var/home/core/zuul-output/0000755000175000017500000000000015134614501014525 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015134636537015506 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log.gz0000644000175000017500000341510415134636420020262 0ustar corecore=sikubelet.log_o[;r)Br'o b-n(!9t%Cs7}g/غIs,r.k9GfD ~FYI_翪|mvşo#oVݏKf+ovpZj!Kޒ/h3_.gSeq5v(×_~^ǿq]n>߮}+ԏbś E^"Y^-Vۋz7wH׋0g"ŒGǯguz|ny;#)a "b BLc?^^4[ftlR%KF^j 8DΆgS^Kz۞_W#|`zIlp_@oEy5 fs&2x*g+W4m ɭiE߳Kfn!#Šgv cXk?`;'`&R7߿YKS'owHF6":=3Ȑ 3xҝd){Ts}cZ%BdARO#-o"D"ޮrFg4" 0ʡPBU[fi;dYu' IAgfPF:c0Ys66q tH6#.`$vlLH}ޭA㑝V0>|J\Pg\W#NqɌDSd1d9nT#Abn q1J# !8,$RNI? j!bE"o j/o\E`r"hA ós yi\[.!=A(%Ud,QwC}F][UVYE NQGn0Ƞɻ>.ww}(o./WY<͉#5O H 'wo6C9yg|O~ €'} S[q?,!'+ ;!@G8s%7ל'scX@ .9w%9&M'wi׷VP΋b؟5g.wk"5-՜c>QviYDZh$#*)e\W$IAT;s0Gp}=9ڠedۜ+EaH#QtDV:?7#w4r_۾8ZJ%PgS!][5ߜQZ݇~- MR9z_Z;57xh|_/CWuU%v[_((G yMi@'3Pmz8~Y >hl%}Р`sMC77Aztԝp ,}Nptt%q6& ND lM;ָPZGa(X(2*91n,50/mx'})')SĔv}S%xhRe)a@r AF' ]J)ӨbqMWNjʵ2PK-guZZg !M)a(!H/?R?Q~}% ;]/ľv%T&hoP~(*טj=dߛ_SRzSa™:']*}EXɧM<@:jʨΨrPE%NT&1H>g":ͨ ҄v`tYoTq&OzcP_k(PJ'ήYXFgGہwħkIM*򸆔l=q VJީ#b8&RgX2qBMoN w1ђZGd m 2P/Ɛ!" aGd;0RZ+ 9O5KiPc7CDG.b~?|ђP? -8%JNIt"`HP!]ZrͰ4j8!*(jPcǷ!)'xmv>!0[r_G{j 6JYǹ>zs;tc.mctie:x&"bR4S uV8/0%X8Ua0NET݃jYAT` &AD]Ax95mvXYs"(A+/_+*{b }@UP*5ì"M|܊W7|}N{mL=d]' =MS2[3(/hoj$=Zm Mlh>P>Qwf8*c4˥Ęk(+,«.c%_~&^%80=1Jgͤ39(&ʤdH0Ζ@.!)CGt?~=ˢ>f>\bN<Ⱦtë{{b2hKNh`0=/9Gɺɔ+'Х[)9^iX,N&+1Id0ֶ|}!oѶvhu|8Qz:^S-7;k>U~H><~5i ˿7^0*]h,*aklVIKS7d'qAWEݰLkS :}%J6TIsbFʶ褢sFUC)(k-C"TQ[;4j39_WiZSس:$3w}o$[4x:bl=pd9YfAMpIrv̡}XI{B%ZԎuHvhd`Η|ʣ)-iaE';_j{(8xPA*1bv^JLj&DY3#-1*I+g8a@(*%kX{ Z;#es=oi_)qb㼃{buU?zT u]68 QeC Hl @R SFZuU&uRz[2(A1ZK(O5dc}QQufCdX($0j(HX_$GZaPo|P5q @3ǟ6 mR!c/24مQNֆ^n,hU֝cfT :):[gCa?\&IpW$8!+Uph*/ o/{")qq҈78݇hA sTB*F$6 2C` |ɧJ~iM cO;m#NV?d?TCg5otޔC1s`u.EkB6ga׬9J2&vV,./ӐoQJ*Dw*^sCeyWtɖ9F.[-cʚmD (QMW`zP~n"U'8%kEq*Lr;TY *BCCpJh~Eq EmFjq1jX]DןR24d XrH_HI\:U}UE$J @ٚeZE0(8ŋ ϓ{Ba>EE衢^}p/:F?}bi0>Oh%\x(bdF"F 'u Qx`j#(g6zƯRo(lџŤnE7^k(|(4s\9#.\r= (mO(f=rWmd'rDZ~;o\mkmB`s ~7!GdјCyEߖs|n|zu0VhI|/{}BC6q>HĜ]Xgy G[Ŷ.|37xo=N4wjDH>:&EOΆ<䧊1v@b&툒f!yO){~%gq~.LK78F#E01g.u7^Ew_lv۠M0}qk:Lx%` urJp)>I(>z`{|puB"8#YkrZ .`h(eek[?̱ՒOOc&!dVzMEHH*V"MC Qؽ1Omsz/v0vȌJBIG,CNˆ-L{L #cNqgVR2r뭲⭊ڰ08uirP qNUӛ<|߈$m뫷dùB Z^-_dsz=F8jH˽&DUh+9k̈́W^̤F˖.kL5̻wS"!5<@&] WE\wMc%={_bD&k 5:lb69OBCC*Fn) u{Hk|v;tCl2m s]-$zQpɡr~]Si!ڣZmʢ鉗phw j8\c4>0` R?da,ȍ/ءfQ 2ؐfc}l 2窾ۉ1k;A@z>T+DE 6Хm<쉶K`'#NC5CL]5ݶI5XK.N)Q!>zt?zpPC ¶.vBTcm"Bsp rjﺧK]0/k<'dzM2dk–flE]_vE P / څZg`9r| 5W;`.4&XkĴp 6l0Cз5O[{B-bC\/`m(9A< f`mPіpNЦXn6g5m 7aTcTA,} q:|CBp_uFȆx6ڮܷnZ8dsMS^HэUlq 8\C[n膗:68DkM\7"Ǻzfbx]ۮC=1ÓOv$sY6eX%]Y{⦁# &SlM'iMJ았 t% ~@1c@K?k^rEXws zz.8`hiPܮbC7~n b?`CtjT6l>X+,Qb5ȳp`FMeXÅ0+!86{V5y8 M`_Uw ȗkU]a[.D}"\I5/1o٩|U戻,6t錳"EFk:ZM/!ݛ@pRu Iヵvyne 0=HH3n@.>C@{GP 9::3(6e™nvOσ =?6ͪ)Bppًu_w/m/0}T>CUX\!xl=ZVM\aٟ6h㗶E۶{O#X26.Fٱq1M k'JE%"2.*""]8yܑ4> >X1 smD) ̙TީXfnOFg㧤[Lo)[fLPBRB+x7{{? ףro_nն-2n6 Ym^]IL'M+;U t>x]U5g B(, qA9r;$IN&CM(F+ hGI~Q<웰[, qnriY]3_P${,<\V}7T g6Zapto}PhS/b&X0$Ba{a`W%ATevoYFF"4En.Oa%ĉUHSR0=>u)oQCC;^u'}8H0]+ES,n?UU{ x~ʓOy_>?/>l8MrHID2VSsMX^"NۯDc558c&'K0L /C5YDqNe~ض˸nErc֋@aw*r܀0 a {RQXV-/p:MP yЋ{ Sym^?̑sU~' Ԓ f\itu)b>5X -$sn.N ̢t-mfeF;gUаQ/ .D%ES*;OLRX[vDb:7a}YF30H #iSpʳ]'_'ĕ -׉6tfЮ$zͪO_sYq+q艻*vzh5~Yy;,DiYTP;o./~^.6+zZFD& m@WXe{sa 2tc^XS?irG#^ŲDI'H_Ȯ;RJ&GT.Kwj;of¬zHmmS2ҒN'=zAΈ\b*K ڤUy""&D@iS=3&N+ǵtX^7ǩX"CA⥎å+4@{D/-:u5I꾧fY iʱ= %lHsd6+H~ Δ,&颒$tSL{yєYa$ H>t~q؈xRmkscXQG~gD20zQ*%iQI$!h/Vo^:y1(t˥C"*FFDEMAƚh $ /ɓzwG1Ƙl"oN:*xmS}V<"dH,^)?CpҒ7UΊ,*n.֙J߾?Ϲhӷƀc"@9Fў-Zm1_tH[A$lVE%BDI yȒv $FO[axr Y#%b Hw)j4&hCU_8xS] _N_Z6KhwefӞ@蹃DROo X"%q7<# '9l%w:9^1ee-EKQ'<1=iUNiAp(-I*#iq&CpB.$lٴާt!jU_L~Tb_,֪r>8P_䅱lw1ù=LAЦz38ckʖYz ~kQRL Q rGQ/ȆMC)vg1Xa!&'0Dp\~^=7jv "8O AfI; P|ޓܜ 8qܦzl5tw@,Mڴg$%82h7էoaz32h>`XT>%)pQ}Tgĸ6Coɲ=8f`KݜȆqDDbZ:B#O^?tNGw\Q.pPO @:Cg9dTcxRk&%])ў}VLN]Nbjgg`d]LGϸ.yҵUCL(us6*>B 2K^ sBciۨvtl:J;quӋkKϮ듃ԁ6Y.0O۾'8V%1M@)uIw].5km~Ҷ綝R(mtV3rșjmjJItHڒz>6nOj5~IJ|~!yKڮ2 h 3x}~ے4WYr9Ts] AA$ұ}21;qbUwRK #}u'tLi'^Y&,mCM)eu㠥Ѻ\a}1:V1zMzT}R,IA e<%!vĉq|?mtB|A ?dXuWLGml?*uTC̶V`FVY>ECmDnG+UaKtȃbeb筃kݴO~f^⊈ 8MK?:mM;ߵoz+O~e3݌ƺ(ܸf)*gCQE*pp^~x܃`U'A~E90t~8-2S󹞙nk56s&"mgVKA: X>7QQ-CDC'| #]Y1E-$nP4N0#C'dvܸȯ.vIH"ŐR ;@~y>Kv{) 9AG ćͩ$.!б~N8i"1KФ\L7/,U@.ڮO?mُa ې!rGHw@56DǑq LA!&mYJ*ixz2*{_;IYJXFfQ* 0kA".mݡ"3`Rd1_u6d逖`7xGMf}k/⨼0Κ_pLq7k!dT x삖A7 u/~&ӄMu.<|yi I?@)XJ7{ޱ?Q]{#\4ZfR-dVaz./f+yGNMGOK?2_~3\z=y}^G$*A! IcuR.o=MZ9zu b#s9@*иrI@*qQN||Ix;I}&ݢ6ɢ}{]x}_o>Mm8S]~(EX{S yNwgwml"Ms>\΋"?|nKfֱn !u:%lXZlvwohbL_#ǂsr_d >04SRm+0^PTi-"] O('@BKD6 {NmʐzRj.aQcb^CZ-uvpr CѐٱlGNzIveca=%1Qi F>wTLHUGӃ\sA֎Xpljlv ^tSȻ \cPwίwX"{>9V0ټ_`#U8VdTtD_GU9V ұ{q:ObUi7s )B ۊZlzIA4S#x,T3ѱ ԶJ=rs>Nb: Q6ˌ?J%.Dl2ȱ%ܱ&6XƟ6qg(ƟUSok+Po$lwvmi8W_VT18V =| ub6QWCnY'"*aN08wuSH]f@!=߸2V%Z 0"G4ȇʩ@]>Y$ًF_Mm_Tt)ib+q&EXFu򾬳ǝ/RS>r,C2NfOjpcm{Ll9vQOT>9U;])>6JdbXԠ `Z#_+D[7IIjJɟUh ҙ"`"a ߒ"G̾H`6yiCk(OA/$ ^%K^+(Vr[RR1"u4A.1X0=7f/"(o9/L1X{]q`Ȝ/; 9a>E)XOS K9mUxBa"'4T[Jl /K/9,rlCAj_TiǘP,:4F%_0E5IE'rX-|_W8ʐ/=ӹjhO%>| :S Px„*3_y.g9| ;b`w NtZtc> ײ1KĴ{3Gl& KT1ZWX8?C]> &}BIrwZ\"t%>6ES5oaPqobb,v 2w s1,jX4W->L!NUy*Gݓ? KmmlTbc[O`uxOp  |T!|ik3cL_ AvG i\fs$<;uI\XAV{ˍlJsŅjЙNhwfG8>Vڇg18 O3E*dt:|X`Z)|z&V*"9U_R=Wd<)tc(߯)Y]g5>.1C( .K3g&_P9&`|8|Ldl?6o AMҪ1EzyNAtRuxyn\]q_ߍ&zk.)Eu{_rjuWݚ;*6mMq!R{QWR=oVbmyanUn.Uqsy.?W8gqOg-?[~,;n9 |q|w.dަ'/>I#lX9vW !&H2kVyKZt<cm^] bCD6b&>9VE7e4p +{&g߷2KY,`Wf1_ܑMYٚo2Jh)8Vgl0v/eNEU"Ik dRu˜6Uǖ xs%P ع omWl҈sApX!^ Ɩgv{Xn|$̇d`>1Ljn떚F+B9l"UP۾u2Ja>0c0Vvގj$]p^M+f~@9{bOe@7ȱ^%u~-B竟} |23 Z.`oqD>t@N _7c$h3`lg\)[h+pHBr^J |r\8czEnv@qZbRT1e8V Scc6:$[|a.fpU`ZR֩bKgTlѩynۢ]dEcaLF&"AhQ|![gIK v~,Jc%+8[dI368fp*CDrc3k.2WM:UbX[cO;R`RA]d+w!e rr솜[/V`+@;Τ`5d0ϕ_Lع`C"cK>JG.}Ε00e>& 2䯫vNj31cMdA Od[jlec=XJ|&+-T1m8NP$%s,ig\Z:h Ћ߉n!r}_\ \5 6 d#=&X^-kOwĝJO\Vj; )! (1ZRÜ:*<~,zɟڮ2kg ۱O/fꠗe>9V|MX!8j0"t \5Ȕa|)v"Tqw?E8V 7d[v_}OO-DcĥF7FX%2@KɴH/'n7d"vHb/r ~[0AG꠭y*8D*-Rz_z.7Ϝ*"꫒?`a;N6uilLn<Yllmb rY״͆jqTI!j.Pٱh s!:W_´KxA|Hk1nE6=W|$O -{]1}bO`AWOlw&5fH883\6y 8V -qrG]~.3jsqY~ sjZ+9[rAJsT= 02ݬf¸9Xe>'n72%tJ4vX[7g"z{1|\*& >Vv:V^S7{{u%[^g=pn]Y#&ߓTί_z7e&ӃCx;xLh+NOEp";SB/eWٹ`64F 2AhF{Ɩ;>87DǍ-~e;\26Lة:*mUAN=VޮL> jwB}ѹ .MVfz0Ïd0l?7- }|>TT%9d-9UK=&l&~g&i"L{vrQۻou}q}hn+.{pWEqws]]|/ǫ\}/J.MLmc ԗWrU}/Ǜ+sYn[ﯾeywyY]]¨Kpx c./mo;ߟRy*4݀wm&8֨OrmXMlk1=VzpO֠24hf 1hi D{q:v%̈#v^nBi~MefZF >:/?Ac 1Mo e9d ljB?K_z>p%'3JQK-͗R>KkΤOq,*I|0]Sj%|-Ԟ = Ʃ%>H&t;9`>$& n(Ȋ1{TT%41Oa'MXA|?m$_P"|u73c7;fSk‡lͯj%N*&QdaSG;Jb\V2w>R#j0h~c#.Hyz3͗JD)"Y(RVbsWڡ8TCDA"=5aSVsѕ0Wܞ_~C9T~/?X3H!)hEX`^oWy۳PBɁ{>}s}$~I=bھ/|_&t__ @b&08;@%Qî.<L{P)eI?ku5uۋ%f2'^lA>SE X,{5bI3ـF$2l`l?Y*Co )-=־3e ˡ62.J}bŎ{,±eBXDi^d9V@N ?s䠅LTrwgQ$LQ:nMU&9g$YK2nJV<2O/x?Nk/2m`[¦#Xf`2Ȣ"`3PQ(D6DBZD/FjT`X娚M#ӹdz&F}qA)kTוՇ8Oա_.ѥY]=ۍr^MjV]Wv`,Du^F u2rXdZ`ꦖ񓵣&]R]'FԔ{XCT~v+Y{z,IX4(n!J܉x3KF0:/X7PH~ŀŀ_P5;)s-#xt<ŊeQߎ:5'ٙ˥fKoU8;HMRkݙˇO?f:T@_ e6}b%xJ.j~q\fwxQ2oˌj D gvMUK m0ON} d`;k-h.cɇ{wzW%IG" &um\V!]w@"1|ew'\9 EEꪵJgw="쮓~B{ȹcs-8ZrB(QY6p:YPx !G?Q̧*^ͷ(.06tVWSBrjAQQͨ AG8"w3]4e``D [ AM=*$Mw/DjOg]w9c>*oJ.ptS|B%}\]r:@pܺ~,g'3"l.g؄<$hL%mCl*E²EMsMͣ׌_,&yꫦS, 5 "3W8&O"C,m '(*3Ⱥ s)jjF( n8+BԝgoyIu^7Sɦ%\S;)v=D]ފUU}Q={}&(B%x `.g0;Ǡ+ԝ]~sv~_qv'泶PZ-Tv(7!ݵ,Y%}]SyGf\qn=n9^FЋDEVNK%߸' 29KŴdQ,*NQu)XzV!}3m;@ Xxߣ޽zrd,G(4ҠAy#"* 7lMeGC\?XIc6:b$2e"u^~HfD_̸,ϯbTHA|dfcyz&y٧F ѫ-ڌ<7_~D S4}>Kg20kCF^IQ|^ar6u'eQNԷdBM=_cJk'w!hLU6%0յDasښiDmy|Խ+$hh p|m*~9\[c\< }#tƣ*"҄qXWshV`D>jp}K\Ê螺xxs$`!QEϾ^h΢+gR]Rv>@EOŭ`U9{Z'~ʁ'TG?= 8BC|? mKĀ|YT0"ׁ:aCҮ|"l:D#^4+Z`>@5fXtS7xhu`{WVuce]28Agzǟrc&3s;: m 76[ ĈDI򡑊O!'W)"T~6c*#* 'zQZۃ0gx,0:܂՘WHE@[^J0jOI,&1U-LSIuXh\U 8+U4:p=ĺ-Urh*xz8iz,BKZS)&n '|{K ܣnv(ȵYf<"UrEx\j\s[V}QTk`hӴth .ԪId6::LƢLp^c\-DA>J1u5rzK,w @@SVByZHޭa]{'v%(B!kk~),q[-t9&Fmh%ǖ-[n%_}1wmTǾ2{|>>ɫ+3#u[SGZю$:`{ܦX-CK k:5>ܰEk[KC3+s'9/36 V3fKB\Rh3qD"SVȃ1,CB(0B&[MP&KCHUF V-r6+)!FjL$x, BtV\Fy5Pױ!#ϒ(H@wq.tBgKD$p閸k*Id۰tUKF[u%[h]h*3[u=N7U%R_A'D<|vpk驣K݂; uL`YD_U']3qiwm8"K"mTљz7x+yb&`ЩT>.$p=\-kwyVn^ k8`|ڛq|Ԯ CD`Uu]=IЕPb=]*GڙnYpS^Ӻ[ Q5cjqҜ~cߨ5:.Vhw |Ƕi/D1(Vyk}־@qsuy=nFuRu+D{:DjdNWCBAse)e\ ZՕ9ȓ|DquXquؾxʒHx<8@5jS_i 1Ӟ5Yg͍eoG&-?0` ŧ̓_\D|ŗ SS=;ƇTϕG෸.w6ߤs#7tA .&6}÷B Cb{IRdڰ`/:*#{J {o&FC#L r&ߦ/:PP &&ٗNddkNE3'6f'?M0ql.^'Ա'0~愺>t޴ Գ; P78E3D("hEmtA%D~H\k[UDA6xb~2kE8ܹlH1p ;܂Ļ}҉#D-LK>88Vݖ@dr!;)ˢSV!Mݏ@UgBid`d EQAtycpCvÛ;&:P v+U>SlՕaۇX&zqaC~p,Dl^MVDzߑX .ϺprʉTPؔ5eXej۶_p+J~:QOį7d<Il$QGJv|HdE蘽iFI.K;Pib93uzLUr/^= 6{:m|Z)$&P^>hPU_J^c$>ި'ᗋixq7;b |Q߆'`w&ωeҍ}#:=tCC,ǐHmE{2ķ$3U^IzIi 3](=zԢvF<t8,l?}ݿp[4֛'bMJA>7Y_ǻ&X]a`BҾ( ۣ?OѹOG: x5[KjwE i0ᇡ!~h%``n7=c7zx/~P (܎8RpRoS,ԡT(U%a\FS.@`T";#9 ]` Ůi`4VO9*SǘT" I*rܿ L{QPq0 c֙Xz"5g/0 3DԮM ,w.xT :]rRmpX P4|1HVFgPNå]" [N*IÚ~&>'<TVOeuŷ"q+qn= ~äU)`q8w*6 8k-O :aG/œC}M8PS$Uɦre̔\rPgT]p<,`f H@p7f t^&'wd9ل0nP>w37#%jL\SwS.Zg.|TMq*7 o25u5Q{/j ".&Ъi&UaT4R*, (a&S/b7= ,kZ% GI?rᨷ.Nx 2m2f20P7Vw- ej -8ִp&){Dc?J)B(dg02FtCn! )7`l&j,9 G˞|?loxma+_?y!H _LCDy$'57`@ks5 BeXXrp_' wiT3fٕ9h\" BvsUtF1h6gOsV.싏F@b0<A}o\sԼ}?Hb+PRJ7?Á;C)B~= 'h2Au3J&>-T ٨ CX~%9XfcVIlj6`"\aֵ.<FlPkY-o=Tl\[ .<ȀmT.LH~;1dzܿU_ b0"[ 4.9#8'G%8ˏ_ ?`O^w.w^:KlFSXZ$,ã֗-H!.Փ?I=bDvr<eDFU C)P?/S OY5 v).Zpy7beR/bǸo'C;%9e'M;f?4=懦1'\x( Ǧabm3FrAi^%3~RWk0dz\ nQ"U} xZ%?Ey򠖪jZ) Җqǁ6fSV!тVG%`mQu<9G[[Jc =RyֺlQ9v5xIVcڧ(#8.L1,f(ʃ aXqeH}'L >{ܛ d,RFL㎀шGIuH0ͦa)Abo s30B'1o5_̦8pTIah!v`+pjZL ߥT IݍIa`oOڇ4 "R YM6(Vjkcj&a[Ԡd7RH%茅Ad (BGHv35aeNf+P&}c$)lI0$OC6`s-8gܕ9Re!a!O 7` ]ޑUPN~MnqjPKMږً)sFm s\M 9$TVN8W/UߪkA٪lAeO"*e){B=k=ZNbw٧TuݥvUivOT뉒Z;W;w'(|AŪbAOT * j?QP{AUAuvOP牂:; ~OAPoUPoA{;.7K.YoGKx y(rTH\ Hb< AZ((:iW]GU U7mAR̒HT[f oEuu[avOl8n t!$a 08sM.;7p=dXI@q%_(P[3-nOql<4޲FJ@2ܐ;uET?\? RuYJC<:d.PeQY"KcURVKm^@_{(ˣYgkx8LPEKf$ñ+hp${S_0@kOp,1ǽ!a_QW wMu8ㅔ_PF6 bh# " (i"ОqNϒh%^QlO'~'TU% {Sπ^h:R9)2)zx罌ST~frΐOUc=3HGd:);f,>Mt&@ .9kׄ}Kgea=39 WyYx!ŸלG ELvdˤ)ob)`0-O7LL􈜮Kf`:P es#K[+&<O 1Gfiq`SC^z/U ]Ez YzB=Q:q)X TNtE7吱oǗ&o"WW8-~#h38Un 8ٖh6 FETS;GT54Jn~iY ԴZa _Ε@ct#?FN9m1U =` m;|MH~{1d!jh#Ί)-o-HN j:*4j)(Z "[PZDEo6ctB6hEXZ08> gzXfyYIjA͸-j-VFr|qMGL@Pю ڗ@ 00<RŠKZiydW[r 7vu2xrt~,Yg!}'AyLt- 2KMUXm+s *J&!VdK6<ତ* ?*"/TSͨZVfɍf+ktai%X7ϳج+]Eqգ~;1)uzO 5lG.˳oI-|Hg\O+x@α+`t\c$' x)VV6R=T0y_lt +q c)Zjٽ+Pv2OEt)m/KlWJ=Y%,CE< L xV*93 N}8u7eo\Bo}Gx^jsȟo)z}8"9=ICXzfj2W/j1}лS|ԙ&:E3 t>;tl ϵ7F9zsh:QG9:ƧzV)?V35ԧ6ok`^X~0*5=^TػK 9mQ}5֝>kW!e(#Y4ɱZSQ!ሑtM5$8Gl]g1\ԱEp8b$őA7[4_iV [d@5*S%/X%!.#ӝ2~(}h%eWk$֛v$AЏEq$WL1i΂icy!>_{pyI3J;lZ(\ZL}2^pH}"G/ƞhj)VR`Mu`B.KqbĐ߆^ y2p8ho`2O B,aCa_ÚfDjRְ A,6 $v蝣II0vQQPQ ,\ JgpKJz J >uh\J;YvA3< I9*<I,߯:%d>Y\7OrHccUѻx"KBt8stԿS!~36!~u6\Y GĢR%pmAInWHp_vj$ș'X ^Ԇ &CQCF$IyۖW}F+kT< B mœLaVZx]}/šrH5'R*c*#bQyÙIyvmG{IyM5D_XKO tnU1 e:k%w6pnuHIˊ#mc74*{,)Sr污JˤԨb LfbĐc'8_T*?=a`7G4vWUޓ`))$}$8ʱ + e,RUksB[ HB 4Mtj{H712p a6:9˱c&X}Y4m+F70 Bao?ޒ8?yt%z^5C&Z*bİ0K>6ZZBb<$vː/T)owY,JA+шR3(Iv٭'X :`MugdXQ?C~.+lkCJW / v*Z%\j&! lilIL#."Fe܍yN$8z(lbP͐Zcu7k!ҭpgtPrBlEHaH= |,q#Am>>L f,##\u..ӌ}{C9<82lk~F+Hi u & 0`^mnZb$[uuXUW53c/o4yxGؘV C{ : \d aļp ٌI0TaQ!)kO+r& J8m^;8p )aRc@*a$Ez1o.'Yͫk!7&b=y"flv^;I*&չUc?~_/r>Ȩۃ|۞n ;yrޚNZM\=ZB2VT{'`z#mI N$z^ŧ2#W7/denQ(~ekIY()n$I s) wV &vV|,~K8!3:'E>}CwH :^2 [H[U.jɧ,&\@ NJ#%*1QO ުw-9iq,+ npkI78Ş,FXy s`1b2MK딘Lf5/D\kRY {HQ Df|@΁փh^1L"D>‘IMXm2.5M],gKG<&4"MzZZA 2 2PAV1TL̵9o3 ?>nIf0+>6j]bY92=#̊MY1\1ۣ֕z ovdV?8mY QoIj2=(lZ|RC䬆M'oP>g=ɧutB񬈨-G`ehϻÊpi׻% A,3|QX?P9wGDy['ڽoԀy9vlW)"|bcكU`;9@,H^GC~tp$8Ib:VO8E)&2G#JUbj "R dv}=k5 ~OZb ;cI.g6u,R\p8`z\$*(f9+PS\u(.r1!5!m⎕hQ'[$Z39Re,!5}%ͬE3X*CĐdCܥ~b)dZIk-hG!锝9rp`Fc;n&aj<3=tWgc. ,*BE{Ӭ#Q_Hϻ֫D J %tXw.< LV(<2mtYtaMjpڴdPE^f1}=op SiR@Śr<(ʔ۬v+ &JI4D,b1)fVr-av-ی CG=$n6u~`\x:Sa :?Ve ?:K<$6ΜLk_}lbJ~^I὾]fϒ<xȸ#dL౓cr"I"12vD' F @$z#mnR6\8%M1HɱR.mqa$W; @x.0cPiHQ#Sb#եnQ9&V?ߘgCZșaR>\}0jpoHbǟDaҙ(B#(6R[|X$y`֗,ۘ0U[D9΃t9#3u@ ){Dzb,dW2J?qNY& Ӊ3 '6vb`:4_Z7-;hn}CFJSxep"4A[ta×d6x*٦Tt{BLSȦU]s6W8vײ~ho'dms=I?41o$mQD)d6# |89wf0,ķ1uNbH, -CwyRgY7fo0E՝;rcF,OF-bƣV-baAJQ”r@lkxXMBXmr)p (۵6+uodbB8%]Rebk}IBˍ2ۦw 8>p`\76)*O/r_+"ɕKUX_]O#}Sj*/f`RBn|aA% ox  w1Zn:W6 _^ vԪy\"3{u `)7)8y viԇ E~8EqmL^ϼ ?p|SK`R %tb OƮT= HAk蘆eeƒ@n/,A{sXKY4zne?ђ,8 >;5#X;jJ$jI*yyZ,Zzk87GUTn_ [G28 . +.xdV͂#w^nT4(E ;Tb:ZP*{.BCs}W d{]|0]c`Ҿ{;m]k77эE?!qd+h[B/P,w9,cXp59@ 0T1G`ON w.@$Hik|hYf&^޹NGIp=Ig $pqY5>)A7>K0Yغ_A uzQ2QD14Xcy^x/8h/V&x?+ +P_W&$ыx8.dly{3Wk 7M ;oNph`M*+ UO,QVzfżhgyՒ^ zB*}+㈂,9Ea?u?sr:_~7|@hmBY=StO߹f5Ŋur3Ih>@E8`? ,6e[VL"rYouPJ* . 9yQ~s tʰ\JDzg/cVYs Fa>$׹G}02⌋3'o.sBTߊ˥srm'YwR1]]G8N*B)\D⅟-7I.,Xn/,&Ͳf'\%7vp.˟v&evU שVl-Xdw琺%' (iAS_QxpgD4s53g9idmPZ]'#h1F<3-]KWo1#or<ś\W9Zu]= T- lUh,`4EϦޠj!ZDῶ@iCX6in6(.m]@D"xĬ,:y 0c S"npHK۲@h{R@-!@ay@Kv`Ԭele؂`DbnVKj#V ?ga?("cJn6Puca)OBR]SOBD3?trO,RdGhys*d6 12M )ͫ=?>$?gS&dkvBY/RP\7Q*(q{(̈́rɞMawrMCV! tݪK;"̧@J }//tvuߜ gXqI0wzәMlhMzӉeg׋k'i7dLuBQ{+R3IwrGM%c^PŦ{{cbm7Y:ϔiӋe,.oST pHmirj2~tOPpwvj^a؄~'>VdS;J<\o3 @'2! 'rB;y cMZXG6G. EQprmni5;+%Y+d7n,gopPd÷{#,o7¼V7k밚n8ϋl,g3KCJ->_샰Ӟ}y_/0M+cD1H>{55"De= d8.,jl(]`i/Bh>1LQB*ɃvNWJq `8s_@:kט^]@w4/?pk(;Js)su) tTu1Y'A Uu/ǂˏ^{TL4 ~ݷ!:hx3<'pM9= 5͂`@]5>ꃯ#O=sPNJ]ƯqϞlvghe۞tW ԯ}s3ZF\J?X۽EL۳{Kf7 z[ 'rE'v1 G&6򘛝#i^M۵qa#*9Y[U2/10{A?s˃h 6"fz(w`IZwg7'iٟ"x\)nŬȡX ' 1)&2>B5 J)+|:H[Q~4u8!6sڠkYzWiNTiR!%FьM^U3 V*J[&Nb-O0P/c͈vK0NsNlu+u+?)&)k؁;e*-kXcUǪ$f^iS%_< ?' EsbX[QZ{kFaxŽcT {yC WFx-XC%F9Ks 5;c_3@iflH?j1 9ձ'DpD v΀,֊1Ye֚QZQ5ŒfP+O}ff]/A0nrwCytm?I`spg$&F|79|>A{t  (TYwE!_h_~>{fS~?%E+ oX0vv&g翺Z;zUeɅ^>·foϓ?pwvQ+9(Ά8i_ЁAo AD\_`T?1eBr=졐0wG9&;*J}ǎ^q&=豣gD33uI~p~|g;0;{2DǙhILڀmwG>7cGq }ԉep;,Esd'ڗZw\N͂tOMȉ7Λs;)_E(xH+9u>g~:=}3^4nV+!ļp .y5;;wLhegSl9TRN۟l63bH0R2WnX9<{}pF mܓ [qAܿ=^L31aM |JM9Pb.CI݀]QFiVH=D+V4uSg  RU+ ᡆp- <aڼ4 ѷ?^u-.PƄp;:s(Fل&TP  N$ݨ%ǂtBh:= i>]=A Wf`GwD9\jw|耛 Q'p-S肣s*\ԏudXEͺ/ɑj|#J(dp'KQ[lpn,i z`$2 5ġP %Tǜ;Ԙa)`rqВ|i֝Vy;{-;P&y(2PN)iD004p=V1`)Gh:{RS>FI{6aɿb- Wd0BOd}|#Ō]X84 bqh0B\lJY2Xh9*q*.(Ct <\ L#-&.F|6ڈ}i M]ŴflbR2kE-V0xkE{ӷB4Y4цh/˦D%łyJ$(>)6!0$;Âaƍ+.1i7=.az{ږˌ-F6y6c9C(5Wo>7v&$&""nnR DT˃eɜ>>Nݸ~6A3a1!pZdT ȩ1K*Y}`*:d]s&S9H6 7p\o#klVief2ى̃0ʸ(džr\@?/o #krP.+S_݄&dbE*~; ,cg&$$ -dDь/{ÉUC0U3hFUd^fx$jH4hWycŠy{K)QB;%%!Ĉ".0 Zkf :76M0ʤN-j>ZNy0ǎbulLYL9 ycEQnP) yi̹u;+ck?bꕏq"8,6&jqstAd%pD: #q#:P<ѱ6 OQr{dlƒyĆCv-H/(1##AՔϼ۩J%6buD` ")e- & ږpH`񥼶eVK !d$ f q޵mk"_"~[$l) fخeO:-Cǖ5Y٠AAsx^@opU4˄"6 2h+2 AKA"0`"s(Y8S) 湲(w4ɾ 9>TD&w{s˜qg`h̩iT&q>K#+DgRɯ*7XpvyfL1bQf?K"/5\irp$ʬ`HLK"}M3"{ ; j1E)˵7DK-< .&95!!k`x`J̡?F+*ssIEw\gN+s!5rpil@C}0NwMC_[)M3MyF2^LPK{y$6,:e]5;#O*G)FꐞbDDCDKz"/XI!J ޹`yxgяxՄpE 5L,20$@pKl#AvXM$ :g_.>њM^fk]dazp5늬T |%0V^^r%"gv`$#f3VKM>VٰБƄm ] 'U\tJSMTD!DNqDw8q>BѳaR:Ĵ!!j52%Dl (^S3e*Mg?H_\k(.ic%w.x,Zn`/WތWOߙEMp`<@x@P dzW~ _DHVX2&`s/Z V)ʝ,*NSwhbrJSkrDmq&[Z0%&2[Q2[lz6ihr)Ks21 +LA/XY3ebkknH;)3TΩ\,wJ#2+f^6W[ZXb5Ab"Հ)8p?Qgp ٮ D@}jS7`%.z ک﷐R ! ː3L9,M &y9B=r;lpFsi_5T⚞J}DpKLA:th&&Әb^UqDI dP2 hf)77 Isܶ9Kp9ihV=˜BOǩcA8RmՁ)1kzu!Zp ]R*o VMCPa?sxZЯI%ңbiC(>q 0YkYfu`bEx~xg>ݑB6"7[K!D+zqIׂ)ûB;”] >|:+epԀn'!`hwY[(JZւI/aLp6J=jh,[?\;cH0g_p!^  ݩ`Bigf[ZJ8$2. =]vGﷀۊ9Ձ),Si˔rdUUaݩz 4Cu<<:MaY#NFC{[y>Yw;ZBqb>]VPુQ.g8;|1y|2{_ t5)i>ahY,y:ln_4@U*$4 ;7^\[9qϐ4 );wzg_,F@%.ڧфKNeI>PP^:0Av]=m\8gxHwpRp[C<~ P5 PaBQQKfbTLT(N6 6.Ak3BX~"P jPCþaZr`IB) OwM,jNkn{b=T96,9ryBr7.T3!pdI{ۏ)v{jWɏC 00@M eK)bJydFZZ#[ 1u}gtu&c3/is418Qr -MG8D)I|84v^^'+0^yLh iiX,&~h|]1WOe P\LP2&Q,:1rݦt\BT[rWMKjbx9;I>\'LGfaBD S7B#e˃F :`J*KV./ふCɁGFC1t2u:(.XzJ`9>Hw_G B@x~]z5X.fUE R&lz`⶘OƷדpOÁѼ#P> -`"~﫦brv=܈R?rIT>x =rQٷ5)| 1B3P\0&ћcD+!9B\']؛R=X-T7`[X 8JkztSaFp~H\# qLxA)􉟽 g(!Q)c5n!Ɂd%3^Д+Wz4Lp@L_"B;RJS#o%@_R.F%g^ʀe@x-#OK|2Oٵ_K;8p?l7zG⍲Gyׂq|jeVnˉ5P ~\=M0 EN9_߂rN?Rً0A++i%qGtizILg?43 *nD򊗲f&[gBka5hM4ϓO9\J?gzX^*J;B?ۏ+/0*(n푢h-A9((X5ï_(H"@ߥ;+8<qAn~nK%42 >KuAw]</Aiz?"B.u2x8#wQy䋿`փ6>qů_ͫ&5۟0/G(vm%>AbN`4l`Tat maT!ݘö> t#A4!Jј:w_͆a}ɸKT$baqr?˯-}2,hrMI⯸XK* )|z=d XKe-'#+Pgm~6d,}c {:޺cb,{qog*lv/ ոEw3,"O k=џb,˽Y&L;ebp$<ҫd<30ȮZuHE(0.泅-`W!eʿ5"gr3Y$ < Ur8uM8vsʙJv^ҝ|5M>0݃hP=Oe_k.)րLSd9mF2;5iT[e|`M :m5C8EkNa?\ǸWo't/Woa :wP%J_"!o#xKC">?Oh17bu)-;S3r nvz>{9rEt5e7R1l:kS?&#V {7?} 0J8oH ^8Ow^Hʘ}9/~m3g1"3#`70cMX& 8I>D^ƗKHQ%&OdQ*iu8o4O䜶5MtȧVXt7,F0bm'--Iq8¯U W!`cx!<l[0GEeq坷o&jqzu7)$,.fb$/߼^˭[yHg.fWߎa{Ka*70'fGk#V;ɱͲ4'"jYj"`j oDdZ }#I%i֔:G)4K")AZY˜l$X%UriIЄ?Y܄oـ?Pk^gzf< 8,#"EW :\9'3-=I\Ki2v1L5=;7ӂtN M DN|,; a#y(K k-#3в|T4«5y*fB1vך'H ] IMg|JCcJBZfR>b3ʫqλn?l3erJn+.%,YCVpׇGτ/D*K8eܠ4h3eɽs˖7-,7 K82<.̈́b4z~fE|i}?c"rw~_ߛN7hHsf^lcC|в3#:,rQ4z<|$گϗ_-})պdksm](xQQ838l!<V dHcpWgkk_۠6ec2AMb 4…. b`QaY$$YFj*u4ypfLVNs ۖt)Rz 9vUqX1[`TbT9?X/J}@cA:l<QPL]fޭY3 )?']M&޽Q&]o8W>ˆ/ >yPs aAq]cߎKkY/W\Il^u'ϣ8I?bļ^`8vXe:ƉTrӿܓ@i7@lvB@V'=GGULc6G;lI>'=Gd{08YH:KߗYT`QhTjmio{U^<֞JJXQ4& ~&ᄛ}0p}o%@1&&cgB$!8$QHBe4IJ*̲ѧN/|Σ/]3}y4wA+hWiϖs ?n9`lG،øjtZxΔ 5 xmQ"Zx!9Χ/WMFɢcfy$x޹* ( s ,Aç%7 '՚D1ӈƌOcƧƌOcƧ(_xS3䉩8BIpb}ުӼҗ\ c¥V()5y8xt3jn0mp}7Ծ'Ǖ\kscUoy=\Xɶ:G$֌4ձ6Xp 8kjLcm6Rڀcݲ6Xp 8kڀgSTȮ6l!ػir[^G՛/ X]HNI_3}4,up6  )M6 /Xwu@c{'7Զ70۱fX9ɽ!),SuR8TcKXt*DZd׹~tbysC_o/^?͵o/}@YN^k wx'.lsb-nn⒴&:,>z ;? kRnt9W=l0t ̧ö$.KF8S %i9u27#߾&feJCqo,Z="(hDRk',n3:7mq+&5Ơꝷ˷d}0I+?l`/ֻBJ˦x ;W&&?G;\|;)z4mD)潭ZvjZjo/B#0 RmNP>}pbZNyDž9{<[崮}r_(U?cxS/ %DoK10ZEr{{ϜP I"ܠ$Ah0NSу޷K#umiđw[Oz W>6x%3)u]^Pn02xTZ̎8fF>vv324;,E1AΙ;p>-AUq 68EPʜ zr bz u`nf)Cvdar[{y%3cCnӴ8ܕn:~$I|2GYÝD/ *ܲ4MqX0GX^Qsg%4M!8^ifoD"j#Yͷft;si&a!-S3؝+S#m%YMq% [eQess/:Q=p@!!~i_wLFwc5a=¹b*=$QB|<7*C{V8e}Gwap0'[$kfKIQ RG͸V n9Ӱ8n8AV`?/<^GYi(M7֛E p$\^w4Y0X ++x6 8̔w﹚b;(بc؊',yO iSJ@Cރ+nV2齒aq F\`D0:jw RLۤT ;PBC 5@ϳӀXzlƉqT`FN`(bVRg'6yẺ#fQ `ԻXxs)PIBT20!OZƨ#-Fwe1te7-x ?Ğ B%,e F0aTXWѝѝQܧ~:/3v p6NQ)QfcmGx'a]۫xdv:V(X"N8F 3Kip-QUlD( G1Bȭ5+ć 錃a KgiFQ]jٝXF@FsL1"sR:kXNL;2YH/"I8jGFYeY 4iCޕr#E>9=U+7F@ؓm_ᕏU^n %9'NCo@9Ɩ1}XGޕf8>gQes2X>|V,7212$ zxoybm8{UfD(,W2Q+sGboگogZ,O=.e8Ԃ=Hq2mTgD'u3+Mb@n;= XIq&L;^hww!z8)iZ%c(82 EO2F?cQ\PE$jjU!{3[Ey@V0fJQm2<% 'GF/ǂz{ ,b-X<ƋHa9E#!{ _y^R㡐瑇_-i^:m92C>EKƌV0MO3b{ԥ@cƫENJ Jfcpqx0'`$#ҴI)+EBËMG؂ax cul/c=V1S |RB2!]~u1n 9^4ةų#}gQO1Ե#GFF1ı{d/GR>d´#:={Tc$E%* ]"BUd׃*.}!11Qcρw90X=?h6z+͏F1LrSLﵓw;2Ub~N9rX4c*SI"De 4)§&xVL%^U^#>^1^g|8T-XK4w[,n'zbZlοh[H|Ux}.KA{t`V$Zg ^̆)0fWᄲQ}w( }%\۬ޗĮz1ܦvKœfJkbMrZA%q^β\iVwkj;?/滕'i~KQ@%8r:PnEyV~߀2 s0VS?jOnaUTb Cv66ö/w#0ӂ0ͦ ̟l(wW~w mfdXO?-\hr5oW4`JL[s=mʓ/dzڳ^hڃyV0oVJ}5: ^N̟$,ذ7 []_U\r3/m塥GN}8ܯW4q=nJr2y蝾_K, @hX|$eH(tcgfUTĘ3_~?jә/?:Ӿ#(OWiߺFWO\lcm7Ǘz]vr|=!Woko ;`yFǗ‹aׯ c8#WУo3/׭×נS۫P`6L32Y*O?f}t3 q'!ݨ_#~R($p* Yht̍aRV]Wݍ_L0+e5EQSe>qY_jU?Zo7yFci"02>G^fv/p) $M򒤆Q" s7TG׉FEFxQYY=uܨk"Zg\7>QNћoV/׸4#6$ѐ"D0&JyF..>.-gl#8;VqP:C\qBȊ9 S$pM]pC::]6,#RfШnw|))b-=4<̣W,Q'QY.gTyw?SHq5{sV^LjQq *pnz*QqQUT9DxĪ?&XJLz4qY&0*SN2}h]F2Z m@U.ZF [HT t AOeYΝu0b6QEf:gGfwnRYRi2.ZyNȹ+W6IMҠ'\ňĘ.+f)o҆QFgԜzNo摔!Uc|*pW\to>z#15'̌Y]YרkFغ`{6#!f\iu!Puur_-7Hb/7șax hsfݜ |*|F}LeR4{vGʔ#.&Y0.Cc"F8(D[ʗLS T Zӹ ZߦYsش;G59/QsWS_/; pIIJJpJ̣]>3_ A5H*/cIގHHШxrSzm8 OqZ4حd׃AO䭙sl0f7Y+cԹ-1&} *3_zDk4jf |/oլJ_4 *fg4tmq֬೶sovB\QΨs} * kD[peJ5|\E;̣]M]1cLM[0Օ$({M[~P=[~kPXѓ޸6w֗?Q [M}n Lq$Źy||1a=Ш;ݍc,"lp9l2vj2$[_Z !f>>02*nFO䶞sH98^,3&GTbJDu6V\h Cݥ9(Ks:3،O6 zMNS ?qګA~3fh;/DI lq!_M;(~(1ZQD"Q!r~:ZO'bEc% QU6).tq6(W@'aC oDiA}= jӠCև7'iaB. lIʞ@V]*ۣg`r)t;"vF3g Ə;(N̳'cޥ1 xHƘZ\u;"'$S0N-iRSG][N7 ijx_~:<3[p[zR+?zS)DP`Lq3Mp5@\,!uG^A{8  xHl/ߒV?ZZ"ُpԒj VN(T+Z[맷bCxJ9?QSw6uհݽcrD؊C`퇯($8FJ[ `58nV$6}m~zNh% g`/X}s}$ZE?A/nCgh毾y ֮ ,|?G D{(1#r]qlHC" ␺\/e];(.̳_ZX{tf"g9iBjDII" VnFL54$mCTb,"YDKi4jMhZN-D5-ᘙ}-θmat?2fE?ru/W5ɶoe}kw*s|ѰQ~i~ Kpy@pgmeN DžMp3m|າ(`|De8^-?ÀOG虷D ƃl48 C1'qxk'U3V%TR}3&=dz5`vXMT3>@v/A]rE[>̌{݇N^7jܕ-T1&;bD:4fqq`<̱ENpʤFJO^pzzW0jrsCï#4? /)q#cJH+섺pњ]> .t5f-FFӁVVBJέ$|~Fu;b +dF ptTϽiRLL\jN44ޏPig 8E?rf aC25g n?䯛*5;`F ptT[=‘k0Íi;XsΗ(Ŀw%4j-\-gVY>:#2i nS:unyV!ptPSY_P~aRwȅD!4@ I)fegW&)Dս% " |"s%].|h}^6“@se7Dv9D]]r1_iuoƬ#CHk64BgqհR;`<{" ~<5Js&F<(LvUByoV͸O>ۭ]p5Y3VU&C]>rܣ4}`ZhRzTuWߨQuխкvR3DJTؗHB'!<KIxUEOըbT0ǍAfpo~g)x24zD'ކ;>"^E56$0A D ` pt(,Ec$T=VР4ࢬQbM![8>ц xY>Wk1SYn֨Lomce>hm}讓G(}e7]EJ;=5A:ߓ"9h4GWM,J^6#2,Ѐ_(=ɜlt|1rѶ"#pt[QX..Kwh]mE,6@3M\`SŴrڗPOisLk T֊潿X5I?&v&?1@bkD0:GT1bE/W\'Zc.e?ӏcFjKp/,(HR$=ȉK8^u b܉%[5]۪[Spn.vWhqe*blj}ty̘yģ5DK~W)`LD?&3v7+D}k뵱#"ј+I [~v.Økcx̘_c]C*Fԗ!V= :5jㄳzw zwUXX ,S r<ܮLCz4jv3J`P k+Nmv#RkbFƄK1 8 ,})z- ƿ;|*W_˿@Et8&j/^XP!V5NG5S1f Ii(7jUP GPI6yrihgOT*NpKnDd[˵\샾5#Ǝb=@jkQM8)9;7aRI&k"q *=SQ\e?/M1W8RwCIx}<]1lQj-qP)I5pA\&N4e3!Zһ}G2vd͜ q GGQFhӀmfgF{ >\6w:@cfcލW_f倵fyT dV`RǨuN,lɏp[憁(j Á[K`&{n3XPvluw+J-) `T**mk9ݸ"y3dx/.öqaȳdGV:>\q /<Ѵ pOIqoޗǟ VV{'{6*ҽ} t@NR2,ScKiɅ^o-=UJ_=ZMb1kQۿ^8?;h1e 1Dž#{J\ح=:0=.z7xՅe^?Ď=A45 Z ڿh0dbKbhܺbY!qe?O Qce;Eq5 p%]T ьϏx:qQhδgRcz*s݋*i.vNqYzmSҦ࿨n; Y>sEPKr䗞[cgEϋ* Gfl6&1-SLpg<R`Hy)/ \[Tړ.%Zs9ѡWY\V5STJQ AXXēţq " V} bu܈>T n7GdK`9_ZSS\s]mO Qʭbҋ6L4'Q%J^~>' KHT=Oyi(ZenS-SMd)GoW>k[Q?~AT /2)^p߇YLZi"^0~bxV`=$PjHE`&UGը5{!M|->8WZ9duu4qft(>֙'\YKZ^?/`´ EӂCP% )NsZJ[URZjoYW0A34_KyY`l$¤&SH XbZ *Ke8O8m~6 d^6%d2"՘SUPUc5)H^EL8Q@^P^ w/wnC܇N)ҏEwU9ŷBﳴ 41Vh,C8xsa(gb7bЙtRD64xx93|^ iL{*>en9+4Qć7 54{Qw|'et2Ψ WCYQۨ0Έ!Lm 5CZ.o\2{[ :hbnX{_13VZߣwyZ%~b.O+=-ub ה+:9 pl'N;'F"67?u nU%TQJ$PJ ҩñ@k4lK7݅ic؄{yɅ A N$|VT$-LOӖzA!b, je zWJxaHQ[2Ke))r i'h~n8h+ :и7eo '0'EC?!)}C ]zs훖:~n16abgyfڼ24]0O?,rYr Tq =^ڔ(=$+Gŭӏ:-,OY߆ *g&&x54eۙD1BkbOG+4zp$TxqG :>E܎ 41Ld iwF]%R[G( ˕Uyç/גy=Kl^4le^]ۋ.v Eqv v$LyUe22 uL?9~p=-h~MkÜ1L(aF U7}~ -zgiӄ[ ;Zg %\Z0w!ua  MlPd~7j"??>Vowz%,"YOV$A5]1GEQZ|Φ:`C;._B iMXv0V>QI7s05ɓ c1g8OY(Ti!xKzbB˗8DE_o|(DEq%*є> &2. pSh/xlhyh;JQV M=D2tT];Ìn[W'wԉ+UMv ̵U|etuؼqIsP24+0hS&2yNIX)K3y!2θ1|KDv51e\#ޣLt˸yL~nGc~Â4׻(vw3,j?O7%V2k*GnO|0}wSN}]-}LJb-bZ5< &+#y!ynt}l[Zy0aR9%[CUß_/IGhЏGBG4ca/kqo|g~2o~y?kBo~S}O~<=ygQ<,6ﲿvVB^8UAþ]}j__~m ݦVm|~_\M0Y)XIЏ_sQΆBRRzǂ`tU1%+QZXC0s0V}a~J(|9؆*—t˲b4Vhuq<މ4zbE=߅IV]46~M|}/ tзuifvYBįε؃q fI QԾUEk}?,)NCslwg_߭yaz&/dܾ=x_˗/gSe<O;Dx}i?DG;yDm1.|5_*n,ou٧M OoD]l5Y%,f%V͹(}gb~ kKe"D]" qot 1_kқ쿇ݡ&pS_>v-Fئq_m [v16ɳcD%<zOxeRJ U%+N~o>CCY,:4b JL|.Ri,&M,RByW N,n8bӉU`tխaB_Sijs)dKn1'55  UYR,ae܈=ųܭ< D1Vo1 u&#s1iuvwC yqDA,Ʃ;)f_QC6d2}-~Cg82)22$RRKqO +8UHvF$ٷZF I^4vנv}\d܃yfJ%SUv:n{Nw"U X K*H%9 $0.BeԼR+n'rnRp{e5Z ST+d`a 9)OB  [BWZ8 i~Z~tF}Xa.;дy.F"M(̳iPF㬻Ԁ}-8U\2_;0 g+{_~eN"zCwݜI}|̕AN,x1K6ӿǚֱ ^r<_,HdhAu6&F,LKo?(q:}6nfڊi#k.ndM(it'w;h:NxwY,A%ha5 պ:[{Ēcʚɑ_QiǼ9l?ˆ#'kN5)QCǯwxP"PTˊ؝, bks8=/zԓYE={C+T}q_8 N]Mѝ:9/,sj?G،C7Ut:~%m"W~=Ζ9NY* VyxH apHDwV#c)G1xNYW%"Uf>8g &RL{tg)+^Ь5܊=i}ҕ%?_o6u߄ l˛1wKw%fÜչYK-YtuIMYZ<+%UsGykm\!ܞc,BQs|(3՜N= VNݝvܐ EYq5a13a&χпF2PCoơ<.vO?G [n2`90;M1a̭AD[.\c-J`12V˯`UU6_?X=5>kelm]k#P+>} L8ƴ8Kd")w,BZcA?{j߷?wWRN@_PobX1J@ؚ(v(i$^xM,A3 MRH(n%|5Μ6>b, peH ^Twe)o$g0gz-kiYJPZi*cHR9Tg 7M ~tBh'B,j -b ;qd=!*:4kGŀ5 @E3 v{ q?؛F&f-K Äk"0O&Zj g#*DC㎽;>ުG]xP9׻2 F ̔<ᙿCIlWGN CGto[Y[`,p3M>_? 1dBNu8t<lM%:j)QXZoSIӵޞ|iBPo»>/j?vӣ5OV4|dMBr9y>nXfݽri_f=p`^*zvI4 ShJjg)O;'ă-8N~.ǃ<=9|G6LiKM4!DY "9\mL&dKΟ /s^@,O0}-|EEɄ(%Ee(ROMbuz^na;`<\z/7۸ʽTZLX_l9{jURjL1A+݋"Jӥt]~2WyCkɯ0K(!p$.\mBq޳&rD_!**ͥqx|XʩsB 񼀂2 t5E.,n~(#Db."em0.$֟$|K埤5imRbZ 91427GXaѬhlC菊Q1{׭^B5I$Z"x]%)q 1AbX'-u}/L9?~Lttl7Z:fm6텬푢}=W)C3Cd~O;0ö٣ g{E+4E{)[”==DZM~s  뙱1$ˑ8'ɹ{ZDS,}TH^j_@|g_~|)bFc_.}cNX qOV SQ=P0{0avCon}|d@t#[ܼqwQo}vWv Ғ‡G/y臉Y+Q&x8*nۀww 9%coT |}"7{o_[w0{DCڄy;|=&U$G #ݓy@zY5>aWߡd[/} mD"O?1<\%Zǻ994_ AjGGp;X< Yg%?[F]̭;NH g7@yWNKmނ{5tݔyO!pLQ8o#bT;!ԪΗo/f \Z p :aqZYguTp+{o#ʠVo/O_beb.-eD3#A;e  lu Ř1a+v42G\ZMe*麍@.5LXFI KE ְ!M.W[r<"&-E\bLLC›FnƯ[KR|InU(ј9_Obc#sĢ#\zuyV" 2Rָ,]7K3uˣ홰'j'V),[ݔ Da\]݊#d VbyexL(bVqӞiDLJDhh 9C೔˕h /umpƄGF4feIsz mH6GenskOaɣc t@E"96'3_8\( -=@-sLם;[bd3+ % t`ׁN`j.)h4fQ-U-m7s_7Ӫ8yG$8$hp *<0`=a 3sZd0A, h4fQZ](ɝ̶>])+(.58՚uuCD4FY ;<|=1"S_' h-UL-Q\!#ҦQӸGW[e`,rԪ=@-s6uI7vDASnĚL64X"E I^9^1(2K S')3e?/rhL`RTh5uI JM&!✰SᅛT1}idQ)$%Ft604ULJr봈I43GO_'H`'$ {+2 9>FW4"UiFߖ)`_8}8Zd~8juۍEgK7Fc,'斋9>?߫>Un9nRB] Ry./@Zbo>l (IEms΃ƶR!<2ߘ/^Ŗ- Lf ѵ6\I8`C+p UN~7 #E43B.},W/EݑG6@p+ޢ6)^FrhG f(I7'ϜR0ϫ*43JmYAu֨esBm6'-#G26rD1*`$#ԁ%TWQAq4+TH[tXj.Q\a4 hU@z2@1s*3NJ"}tuXS0^{3@1sH"<"srlrN ,Jј9\%[Ԝ.)_SdOA1-%!)Z)R‘XM#2RmRp^7oi4fN(T8@1st}$ ͢k4@1s*dΎ<#9?sFcP3)4RUm50ٰ7 _.# ̌(ZQNeS)r؞ݖFcլw[xQkD)`їX%Ӽ~t :Ay?Cnvo3LjʺZó21?qLL\RH[0@0y 2ڣa43BCCr]HR~ 5zXg~6&CSEtxcvThv6ω4]]Cr}6D1{Dp4놁 .QB3D1o$65,m a\'8'mE6rw%UE̒zвdSe@ٝns} h-[:[F36l@kȡPI{"md HɲƁC4X5J$>dT`ESNQ}ӡmNF0_qnUY&q۫=9vEQ 0ٞ^0q{[?0^>=ϳnH{A1j_MqEF> 0|uyE/-$WE"%I[KSP8̍Jr0a_-chim;6 ߱\LL`͕F(p`FzX寫a/^턱Чbp R24=,mur5=c;z 4?˼7bSΨI_!O(1}lIJ*0i*hʁG9RDv\zm-~ y?N, K/z;DYyZfdC?ԋC ȮuȗЬ~U0׀^f0 uEu`0\.RA,FQn.}.FHCD:"Ih/2cXe̩F!),yNt?DCNt?D9E+!H  SlK'FS-A$LP:<0SaKhB'V7^k |wPւ*:执xxگ'֩jip¤xa(TDِ1,cKMǞp4}EߙRwG,@&G,跟_ ؅i%(yiph󬤧lRNyaG" ^?iMtZt_D[7b}> I.& /eSяK%֏^ +a4GO9Ғ׀2|γ"R6EpĊ.tOqtui&8_nZ?p"T ]Ή77KumL+4{HQl# k_DFA1b+}U4M `gC@Dtנ"JaY^D4+~0BU>ޙZ]톷- 4Al$N'1Zr[Μ0 0V ՀAj?y)@#L}p&xP/V5\f>aF3wߋZq)tT X^7NF!Fc'@$>أG%/ v"xԮD: ?-j?tcY3tDpv&IWqr,7g[QOXH:>5lV6Q*6Q..#M-N7.cNgJxfs\v <֢~ '?-(R(&!r_ ^mRg c2c0N?;MlZ@;J:Ľ8D} a7i%]hN9_Senf}5ޮѹVLMjckD YQ S&@gO7ަ'ܑYۨl/L?JT %P{Po=IlM-PItԮB5y%l5 KoI-xiWԭ&e.@*qTK"Sm{!XK([Gǔ ^Kt|gC'4%5|g78H S;c5^8>Ԇ+e!E4ю8dFp9c@}9ܦ]o_ͥD OȤ_o9BXbk&ӕmy>`/:^lz|F$&,Bie4գ7Ȇ~11 uKj[;;B`6R0F_ w8O߽EoS3c;Uͼݛ^JG/B<}<=y_9 pg/{ R ȻCɱMp&'`HT%X%E5SD %Pö?&~8H}© >8shV\u^/&t}Nã$NYB9s*EYBUkSLhBRׄko,UJpjD'J幟 J /F HaKJ;ɔgCbQrKE iFittϰ}Pʐ>̯a`j(d:Mq{a&] 5rsO׿г~ʒPHWQE+r[IwRAkd<ʆEE{ǠW-r 5&4PƍP6BZcf 0Hwam,R{ьl=8#J_EWGaa4y~|ihdqd^u+Y"p:vAa}C$ؚijb2yu ;3@ѹ/LT+̕_‡iTq![Xb,hԤ%//g%J{yq`0S'ٹ/KciATp}n8L^4 qu2*I}Ay56{7 ӻ{D=*Hj9zr٤BC޽Y4c'YpwQyWŽBr:!xR1E9 ٥b+aQAsR b|t8gyaJrRur:q!uN$uCO2`|1kHɷշ͇[Og߶V@}u@2^w8M0a%ׄ'2:`CxIP°`r˭"$@2rE{Lr*CM!3(X jN#Z^+uAy^)gFnx`fъJSTDxu4X @phJe(.sIC{84 $Xb0!("fJp %LdjL@D:,M qruUF4>Ja3A-39bXK-< '`W9\R{Dk,D^c^kxk Nt@M*_=Uw6Y3ПFr #w7ms"ݎ @-#EBF fi#kwyԀ5A^l4uUwU^zB+&zO0?$1pOKFS @hZ( ,,^RYxK0,/w|]A#,G:,,PwLZ4a,{mw ֨Eۖ;pa7:'ہ?qKۮVVCOSCzg ޱ ùy\,1mFMӺG5aT,̟ Zb ͧbl qb U cXFb`N 娳̦J8,'&QFoNU= \)T靈ݾnpҮɵ*/6 =`.|U K`.8ۀr~Wspt9OM/ }qx(iZ#5.^Q/ ڸi0pc c|uLwWCˈ8c|EXq8Mγ svx!]SڂjY5Q5bTֆxVDR N(Yz:FjWg/ZyKuK!S1f44ю9k:M1g)uiZ+ISggm'q k;[Qnu)g_FMӛ5JVDIa;tP0=c&V`ge%A>Pzܡs;suÌ C B2! KF剋*NK52Bqm,)E9,?GjGaZXM {nIXx$ix**!X'y͔Ŕ#O}(¼0x%C]))sۮ*j?sڄÔZRi¹{+Lg)t%0T v ˌrjRg|ADHRI+X0Rl^ \lҠXJ2S!ዚ9y <[(T2ǧ] _D+8!ү']@~wom owznӋN{䣙_-<5ͅe'(f7+҂iBNnX+0H 0E3FRP<ઓ@;?ԝZ`c aW.-ʯﮚ*oϪgN4:]q |_EcS2r^ͿO[kPuTBvA^!8odj&)3Xwemع2W\ڈfOG _K՝oq]=`Jɢ3s6Ldz T/1[sUEiQn-P;W M7!`QɟFUFfv NP |n+엪lXݎl6>r Qo߆;>&o_*Yծw~ޫс9~ м-o*@mz&piC|)K: [. JO履_ݛ,r+G㰗\?m̬e9rW۰u쬉W_J+Ć=;*r٬,Z*.mؾ~r.{& T[nH(B ðHF6Xpf\% g+羭9K2!0z]weaf+dPk^9a]"UZIC/<B&eO(3J>oǼ<7[cYSvE ǿiwiytoj^W~2.ͭ qەb󒑵n*N=ur:aXe&zs'T8{z<@-%-H7իt8Tb׋lkʊl:δv1-w0%jzy05n_5k\XobKZ y 4JiMFi9u\aJm:JEc!XOZc*}m_uW^9qzNl} [t.i"V-]mw=Ӡ?<ћMue X;ۇ_= modH;P3iֳqL0=O -tļe #8KFN)QLJLh9vvL+tO.䨓'!GRFo H a1ň"LicvR;2Vה,-gH|uPLܥ& BKf`I-ZPt jY h+o#}854='TPN@OMO ='TʞPٙ] bʞP*{BeO =M=**;9='TʞP*;;Z#]pov]pov]pov]{7H+(bFY"$!p1'(@LKZd+R[Ez{ި7z{ި7zPޝm.yylOn=H݆{ᾩӻ nýp_:Dp8,Ur]}tP C@~6?6Hl%j/XPX#`ANJ2DfYox}zu@z g|6`L)&1MR ;Up R&.Eh(*!8Nqr Y6%Kp*>&C^Tkm={vRa63'xNi;7[jl=J^0)@{(,b /y=>CxJ>h:ygYmꢶ)rP@tP E0J 改C1&YSѷ^%GnZO[ DL⌈\H3pe0')5{ M _XHt~Nu.%9Rd{Wԃ8:ꈲ;ptMy9R㠼0x-\[(MNڅb,A::zwW59==Iu1Ȁ֚X#r(9Lt-ӧ|GKM⽑\`T @9t֙_%U)c2$ E) [US? v ׾ϻm]on_ ˁϴhA( Ey/,!b€\sV$x7~o /cK_&^_@!L8{epR=RNJ')U #>χC&ZX EG?)AfkMDcu>EfR`FZ)s^yFG0?= l%7|:]= XxZOkb 2V33'\FU?L^)ptil2FGF`ed*;]Iufx5hBy$؃=p;38fܯ'oRk>?HkPJorl{a=㽹&ShNl0'zDwPhaRմ SɬjM8fߟ^.NrvQ*W$igaV 93/fGRH'.}\.&^.~G$얛?K?@B=C LrW?g'{~M_8߻?7gNfiA|1Ԟ9;;]kg^:j>d:۵ȠSgj9?#L^50짧tdcݍOj8|^ic]Ew,$IbНvԠoџl kWwe?>O`ɶܗZc_+`S?ÜnנKPhڍ#w%P5Лokd Wv;pJ|u nѮ.Rvz&d_^A{^)C4r H[|t|VD7zclmcSn2xκ/"ZkpG9ެ)n!}g}ϔ!W9V+<N=}σyAk74~o )@ZUkW=fiUuHomvpϰjJRSMR[Qӱ9e'ڽYo4q;D-Jl=bGUHC~/\*mB^ Վb.|>O}*0O5H@l 3/Z\_*[k->YGJrN6r nW YhU0_%I)|WVZ܏ruB7uTʇECwi}Gho~9{Lnzdξ#Ϯ'ܗXaWDb$W;3FIn2ɏw5˓ףMɛ-5ۊf(wg+/.~~6k},.n TJ &qRDuHP"*oׅBh5- 8pϰ@f*|cb?aՈ-BHh924C:'4 ?>B +P =M2<ᑹj.IY^nSFhk٠,1A͂5ȼRh?aHi>I*R1BX'Y˳SzD秌C'tIdk,GИৌmFO(볆f>9m)b䦽#'%)@XHca(S` 4M8b<O,hz2"#"~&r+8(BHoGt~-;Z[1d=sE1`ڇAذH,b 9j\r&~M#'SL"e2e^c2Wf d?a^a{^{^T0B)zIGTPe+Ed% Izd=k-xr:OWM#8Y0mb.Vc+SFhTck6IXdG»)#n_gH+X pmӐYx51e&SG" 9:VҼyu:+%FAɛυR3I0KYTRRI||@<=z !t&ƮNZGZ絔: 0@#X{ 2ZG|_+h#< Dd/qOx'9xX.J3jH|ۂ\FhcZuIY|f'Q1e4ȃT#oMB_2,5XY3o*TRYƝ2B 4gx䁥BF a\f2Ox)MC<;-#=zAVۓ%#^ #OuTgXQ@\!'#GׇLC1+[.".kJm y FX~e*ʖ./t|i~rmOZ9z="q"@#i SFh޹z^{z^ͮzzkJo֋bo[/֋bo%dQ|{jTI˼{aːhGNZd [,Z (g7S腥;lOxrWʵz Yx&fqq$h-;[`ef))EF0$Ec' ZȆTP{OBi"w2B ;=g#͛VUiD秌' (d,&T360~-1-xOB `0\D$/,YOxx%,袙C$k%1?alZsfD \粓J юdL xlnQ2[7^sMؕ0IS\ H)#oc"Ӕ902NQ\)#oEC!j\,{Ex,+!D%=Ox|T(4![]caٺ"DF4M xZ[P ,yNLTy%Me$ %8MxG 1UHҚ]"FA(b.I=Br #:@hMpMŅ]ioɵ+ƃădq<0nݪ K"R￿SE[Ҷ,lJͮ[rNqƗ*LT`^+NQΒ9*ƳU%#gd;6+I4»Ϥ~ qcxP;y VJ~}ϫ%)9mNJ:IM0}*sNr(.":$)vj:c,+2BRBWQQW XOq枭|ڝ 332<,~fR.~=NmY8c>`_k۩-޳*`?oף4hejOM/uɽhMt69̋~ -̛*zfH#'@J ;{|Rf(~ڛa/> rLX&٬gC,nJ͟ ]vþm6TnMtoֺ1G (F'ԅٞح{p??:;燳=C(p{:hb_g>sj3Tӣ㭋v]u,n8_^xy]4Z sDMpB:kB'hzq/x1?R&ON`߽Wo_|T?| d?@m*Q" |׊Ph*ZsKߦ\=d{ʽ>ڞ'ĎNxp;dz^LwG'm\7s&yW_p)^WAkrBzbf8ҏ  Fzf݂_ȏO|O$DPMFp!K* - tS"H:+LbelTB+#$}ކUw'sN“TvI/ڔA:EQEm5ݠ :2?;yվkG`6qAܺ@@mZkÐ񐣴ڥJC6JLHH Ik 7I];G;C n$ԃE:=СQ WtpR>GK1O+tլ*(`T$(x $+vD)F*AmUO] πVQ q R) E +5+Xd@< `-iЮRs#`BA 2d [.T ꢡ%WV::N};:h4h 5b:uF }8 .!0X0*qya)cڬC6̹$!E+pNGT2!_2j B{cC ['-,W` bQis C2b@d;M(m;h\+5)lV^<P 譝T8ek 1h aͷ6 !b͍gGܠ5ӳB, ư/M19&:&<% ~a-^ `I* 0FVJ (&M:)nl^ LA5kGRيZ'o*>ةI ^i۱+7tYaը pv LR )VWBB&r^Kɽp@RX,@E@H v/UTxXJ~ qBk-&إ"j"k7hN B -!$Us;kFх76z%)hcrhl CNZe:.3%'F<@H 2bx&$eJL5m4lh=%"A䡾5H:jnGe@܊m}"YԏD?J(FymN "6ؽg퍗]&,v)d6l|m'tAXzH,Y!V̌oY- BTi[q/Ek Jw a:JȀ `>e&C0l.ٶ-/G;hnChMq7L۶(3!N @zj&'40: %z `7uQ'4VGmZUkL $*y`4h;rpH5lDŒXI,CwS^"* bC,2?rZwmX;egvd?LwW*G{Yzmɖr4&HyH^˺ ZV'%r% ӝXv`B`! 0a@z &S.vW *H`''g%`u":kcIɉ @Nd jL`3 7(@1'!! &ܼ|k0m%`~0kC< Q8Z cɵL |c 0s==RB^M҃' e)3I:JΤ "NS +㕂 {Uj)cɌeyT_(%6nϡW90 k v֜,BV'XD10вB j֋A\X~/D|J nX=0C ci'^BOn 2#`@ Ysh .Dn1Rj.:8p/E]jl l`uDrQ<n3uA^bp0[5:@8/S  ND40(B \5zDWZ60Srv/¥‡R }=`"|i)ָ'jΊGJ Tc!'O'2T.)TQ!R(c@!A%)Oc@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P JUO!%;xe_AԒ *] QrJTɜ~+Q J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@:X%9UCJ (.FW@-(%A%!*(*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@߮(P.)D̤;J hg@VWJQ tJ *P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@B%*P J T@ZcvFjwvSMsL'RY}/T"F.u;WZI^(A!n$,$>fz[>ܔl{XMdBXf`=ٗOx{q5= Z4~CaQgO%Q ׏nV.K!342h4Q*4}4%PCt7\} Ȯ;]e HWFJ+]:CWnwHW ]-5%NW3jt04]Ezs&R3t2$NWHWHW Wt55=PPif/RV㷣:yZjoGWPR+//iuӅuL)G!^;QOoG`dآ 0s5wgdzgݾzUJL.2OkYXaIaM9B9kh~ow]5N)\;cn?0Y0j,7OdD؜aLNEo~H/Di|N zr%/V4\yU'׾ۀZDR.b}3RNlԒ+'Z"ÜL6=Rl,h*Bf^ob:ۦ;/f LN|>d;]l9<~y> YN׶$6kR,|x+c`BbC`rk^᲋10:/a]HWմ8^s[xtrtGs70k8ŝȋko 6xS m6\ œt9։nٶ|t1Έ٣0%kޯmYw]]U7JP`ZsQڇ2hlxsծv2z1q<=1& ]^<< :{ żm5(g$_lѴ kă|bdz/ m=i>mCcdw0G|z|P$+ۙH.5+\Fk>S;FrPA ΀ ]eRt2ZM2JG"]iu+lhw*2ڧ^ vR*C+#mC&8S7ӟ NG2eLs:ȇQN`iw<0o%H-cq|Bذf҇yah39~C,A=I2飇sn-&L#cR>c~0fkMW {Q2yɨ܇3tp%%]}Rj+N Uk;Z>(D:@N+ˈ ]e;]e ]"]I-o_WoUؿ< ]U~7K}q.uů$8 0˫UgSqznx5|MZf*8zrβvE_.O)\7[\z~͛Kڇ|0]L'>CyZN5KjRljb`]>7^z~7^d~+:M٢ߎ峃V_TJ!"9^x\a2>.yUҿa:P?NK~,+5Ѿ^rY-j7=RU w>^἞kt{%[sk=c1N7˴.iez|ٛo^SS߽qYRr¸)t M$!`hr>M>^BL7FIKs?\YcX0$1]TᢸǤ1TЩ$ƬYj3%s%T{@bti2f|>s p0^dLldfߐEfUmO/G~k_fhgqu4M>g7=[sFWV\k#|Q6jSqMWn|OQ .y/2;$drrep!,:vz6?1\'KD~/Iru^JsOFhQ E|)C UPǂ L-MޏɟUoaBx5oM}zo>rC2h }2>`` ^#Gn8{U(36|?U<^[tV.{8RE{?W)t6^L(}?=6TLrAYX̠LY>IbײXd&AJ\ -[:ꭍu%u` 3S0>}AC|c om7*Qwn17̷Vw9ׯ9{55nRtb F/Armiz ~4ᣱ&B-OZ086*b"AҨh%8H11mrGnԟO08{r]s3Ԑl|d>Uw>ٍy+B[_C'yN/+_ΡQqcY-mvkzk}oyo_m/œv f&EՏA}MvXhZrBOY<,0OiN}'iI/i\HbE-X1"γ'D .ַb4:Um nKu)RJKmm)*E@=ƘP25i /Q;Q7 b;[ޭo Y;W\?J6}_{GxX iC!lc[M;( ) ɸ+!IsÜHF«xTsϵ*k2RKJ2_`ɨ0Eel?m\w8-`}W[./_|FwP ٯŏ3D.=w8C: C ,-{/ (jYB2#W-ʼB\t̂w 0הqUla} ~3A3mcdi/wPXdۀD[BD[7~%F"C O3[v7RK,W22yD9,qG RVq|05u)QH؏ ;\&12壶Dr<vG_wO2A(`i_Y!D_|]UjC:=0,.& jJC ,O<|Y$EpxF$Yao"TzDR{$Py" /7e̽0y[r*1%/І:7T8Q+dFSaF$!). Bݟ:k=Muոk瞬ۘ? H)R$d Zj(@)= \ &H<++ k-ML$υ,ZB⤤TH!E%r)|MU Nv'Ҧ4;5NtP"Y.9uѤY 8 . }̉RZ.r].σSf{}:=nIRKnyA# ؃E=~<-yYE-7GIdIxl,2#2322eV_Sy &ˉ`a_q{hTIe.XV&NdޝgWa/?llOA4a_87D"J&c}XɒFgkZS2Ez{*F{(t8mn6.*,FWpYoa:r5L4et ~\yLn)Hr~UBGTAR uTD7-W#\_ĨhRKz{jY]0C!R2EgutU,ճfTFhM?3 1ŐzM`H3 N*pd}J+V1~v f=490$6s\P)&'$N\n bFp6?TMQ W3^9oˏ}x{yuyp+0 .E>6 WF*<9пh.o*h9/s p Tf;nzҋͷӾ)\J=)jtT'Į>p{vn0v$]g aMZX&]wH|f<6hn:uFOXd]`@sLɠ#Ndu,hlGZ:E W$ԒFw hi3CQ?筭U\;`j^{JXB-exyP.R{Nї cn-0#dm_9i~ǤUezt&&Vd$ B($<3LLj9+MT&@?&<Ž.͙/wҝl&Q'![šL;x 5KGQoGǢpM^S8} \zra4>$7| xGpG1u,) ?%J,i7Q/S@vlm'9^\^IN-c(wV漸q*w&Tj4/TC.YR..giʛCN|˹N=|`!~ "p_`T g e𙖒Jm[pf>gixvŘil<9MQQCA&(LgemT }6:>Ȁ4Eh0r)*[Ay N 2^ܡa9$LZHMLc1>"'^q_ ^|m<.ORni~( P4߱ɉ\JT0'""2,Eq1d}"IV#F qG,S`85xİ\LD\[QHGzܬ?B &aK|XEdw"lH%1QXվ?Da3A39bQC¹mgHۣ3v܁cf4@ښFO.6͠;2JkaL.%z~h, owڛ,t4J?)RWRI&/ëo;_n?؝63[fj 0>ՅFx@ R{kpFmM~Wۏ ּon#98LG:Jk/9v)!g Zr;Af)/$x %#x =#╒&uIdUzT9vr"0T(?!\P~BWC),P~²FB?N|! 5jxyGu8QrC+~4@=e(.帲4*a'j:?YqHˬ&L5җ^I4DRJȃI`DRݔqH㈤k6?w0{OĹt}'WpN^yih 5(w+L>~(nK'() @sAEĬ!( eT`^GGpRA a6z',UR2-;(5YGIQN))-'Hk:=t^Tᘻ4WY㼎g2 dwSܗn?ٍ'5nFn\T|n06Tu[ WPqA?l.mL>|\^U0\FF? +!h٘Uƙ=OXz0gE pr+rctO6υ(`Z`Px^eGg$Cs͖p-w{k&`)-ۖE,j6ƚ̃qj2y׃D;%餲4ML'(+=M%q% T93@L7>H9_e]&_e]}ErmÞA1))YHif ± #P՚*$z42rIV6D@!.3uuU@'YuYf "t0Kyt/SB>  ( `f [%SG0SSD6!ʖM94ZGbR9c6([_ *8zԖT:3[JH̠dVDeJK-"r`V"cl;C0pDZU9Yv*?*zz=-+޿~qE{W/Hr<(.É~*ҩ~~dUJDSMv#_Pd>pp-_ :P~r5ue7zLV"lu"<c npi*Evj k&KA/!ʇhp⧜|V1?Rzo"=^[oBo+l%Dx˝iJXkJ78,n 7p v0nBn0UYZ_jF6 1{Ikj-›dv6 ݢM`U^"a*/`3)1tDs(3 n5.^ݮ=zoK-n6a-BjP#%$]tbDnwI}YaB2N6$a==Lx-%8k9FF+b,QNDݎZ"n,6^kh "F@aqil4j^^K42Bq6H9__YcT~І|j7l˝ \O* %'QI D%X[faʑD",W/?cdHwD@4:&_afXTqLųB kza\ءS%2<1%sNriM`k`jTb/pΒHMDG _s7<#53(3q@/^: XDFi)K=$),h,Fp$Ct9BO9 ܼfN*0|Q*+=95C dQg5n86l@+nO?U*ɉ2p Iyves>~DpKs!*' |t~*ZX*`)"!j}RL]Em BWgJ`Utb p!ٸtH] oa:rrh(GPڿTC2'n)Hr~Ut99iz;/!SK%d{-U7-LĮ×'(kMĨ$/;3;ޞ.T{mft5~.!l)"3:|V֌DY3*SπK BMmZ,kCf0uR9#CP->wo' ]`vzCS*ACL j\1M))LNH́|~\4MAhULԟ󃲇My3TuuW@?3|so^9D])8? mF(_?4_h7M[@xK9LJمX*7pDEi߿UAo~p)ovgFIWb}Gm8iKm7XMbX/Y"eh4KeH^b.o ` uF/ԏV? `$*i-C t!nEqaXX" 1㚤)JG%iֆ }-[&N4h&S ~s2ȭYx*KNIlGZ:E W$ZTO54&f~[[I3Sv^F٤UnjdthFEpY'tk{edZ&=?0PN@"r>4Q/ߑaƄ0HsC1͕@,Q77*c&TaO/r4#PVU!J-D`L9%=@Z=I12u0vlh;M09r6{̧D d3FHHX̝1g2<N F{$چ-fOb\W _kq0M^346}L̞hu]2Ҽr6jw|W|j 8>1O&L1-M|2[jF;&ڮ Hհr mꝋ*,oY-2Ohb`sRd9=7HJl_JTX՝_&҈-{Xܱ=5Zny|7b1ٍm_~[{ow|Ɓ@CFE7,u4x@ RgUbb-7s̙lGfq&mSS&mֻ?ئ2XW}}6}i +nQmȞrC9%xA݁DGv`e/~ij^u ^j!rXRls`"&01 eرen[jm Lbμy?{H Iƺ p}Kp.H0j㑽/;{p{X-njI-uK]xԭnd}dHYBD ZKV:QSD҇V)@#L~8%3 DUs7r65E89ʡ;W?>< -{p<ÓqV>kʾ}u\ c qGZ(TA$ %\f.ftJyI7I3:3p[wJ G?߉`w#sCݨ}CݨT'vw8ˡݧ^ԽxzV\=:;qCMP }% 2T Z %6L(A;9`5yT͒P%-z v-h9Xύ$$L"I!3⩣FksN"Jl7v{/!ۉ͒ K۳%Xr|K%fØZoN(1jvcC- k |XTo%27T2C)Gy24JqJ>gMR߉= zqX ~TېYJN5J0rjJvc`H#?z\WY896 V9z;ޣCay7"e,/  e`b( (IX//ɢkXM,/I+lX$b'<) K2F3)I 6kBi&п)Ik"{Քҁ*)Y)ݔL%FSuό. {{MCʹduʷ-)M6K02%y YkߌL1v9il-M{J;ʴ&ͅbU'R.J7vSjϫY9}:kJU:pZ rk6:Yj 11V󘩞ڣ ǜ;5z3<όNsc8qmY$QhƊ$eP %q$`%(/ *Õqp$&@=-$hϽz#gse}?ko=Q= Srq{?2riXO<ΕϗM_5T@ \hg|`0;&֤HIb ENj52@H)'Th otPЯ Q8' X>'ֵθӆ<o1wB[klcM048N Jb㲥F{ Ri:SQ>h9*^0rU74 ֍§47ۨ]yMs3mR~T wu]''v͟Ok\q͚EG7x@bib ]W^R,>rngyyn'W7=Cd:!BHZ3isp\Pi`hn4I;BL~O9L?W-يm/__#>b#Ѣ47/,켅`'g<{z2|8zA˖k3"W>~\=i^x58',m>o= Tcn6Q-k+YmҲvM6FsxCLp3'jyLey T@53>}{8u\xyH^.Wyу ) VT OL..p[:-6Jp HtFL!NDA$0J9S(nnaĽ1{ 5i;{yksS?gʰ|Z~*mMtxL9eP` ` ` Hb7kJ`=+ѳ=+ѳ=+ѳ={GPOC_uzљEPonkøh [;'RE'6`hR>bMkh{oӗzk(Mnn\>[RqPT^$%筼'1ŷ'?% Jj\;^F!7tQ>=^3 I$~n\~ru{;jE7kt;ݳ=hMdXurVJ`:Ĝ X#"nR9PkL . Tcm x|/F is-],J@mrpkIYi+ 7o)@N-6}ފj)@.VGGW>u|ʼ]M7?3Y͗ ~>iongF%K׶&r-i{}t~W=dra:]Ч{C]+sse'ɭmz<œwTWׇǼ/nYbU 9OxJhcDjovFAx1y!bfiQCGYܐQ}(@&qtQ~ʋϷ o[Ƈ.}'M>~n:5o_} 0b5!i??h> "5dSh߇.coZ|f_5ux*#{:$NVkP!ǂA$AR- lB-EPkKZj[qw[ʍ.nYi};S?ޓqdW2 6-}0^3161|CSbD5e>f}_uDJ-2[b7xwzp0f)0D%$@tY$"R:" p@@ֲE.qVqLe r] L#(U Ҵr{Œy,wcZad`(QHLd$&F:łKs`&9bkǶ8-3eN˜%:`ٳX]@' DRX]iF͍ObT}~R{42t:r?j; ż>袹,9*YqY%塶} %VJF]CyDMyu9^;=LeoqP7"6?9~a0z6ѷ+#kz#m^Xm]4jj7lEzع\dmiI#crfYkl9a.8qjR5V#b^pt2>Vp<"jkrT]8i"/ڍ z.;n0XQ3#a9ӶUʵ^ڑUkPx/]um^?Z>M>zIו Ykf5#dr2r_/[lp44eb}^JU2mtm4"$A=Q{Am|:M^oT5: gwoh"F4)=AGFXsH{-]=H㸈zE:nqK:ݬέ`nA˕ dhIT8pGqbc`kPL9L9UaMVz)HFǤ"03,ŝV25 8` (O -zݗNspamf]yLc(JL-.0Қ" t%ը^ I:K"5e-xEK{E)3Fjf7.P1f 8 ^0tu@FdFjj[=/Y8$)RN~X0)6 I.z?s4X,s$V0|Ie||yaTlނIu'F%؏G?´Wg`293L#=~‡oM:U3&<+ R4Qř<<V"@s!h휃?7~ ؇Q2%LR=JvR& "a%:8RL݁L…~=LA^.*a@WiC(nKՏtHy\ p::=I4iZ>0uZyZ\Zoh{m>^-L-p%p1͍zq8=q'+3ŕ.SFZ~o\?x1?8^7n,!@)Y.|Tq88=k K kNUΦu}c %hz6[gJ^M3]llFmfTPjLPLZ|T .=Y9LoۜR[g%Y6붽b1Z:I a!ibإgPb!>Ũ8Wϛ@Up3 ǿ0:yuW/N'/^=D۟_@`Zl]{ Xq=mM -0@mz]mny˼7هBT5QuE_&MEAؔKW:Nădp%q]/6h <&Ţl__Vx wkȇt3YʑAo|¯R~[GWmNIDI>L )8 qC, %L0霳V#fa\Ȉi>$=a!ʚ}-z^Xtic$|AGn=:ϭ$j)GToN+:0]I[˘M\q4 7nkK;?bjyey|%GnFykݒkNK6U\ɓ+q~3RV⁛.>k W0-xMfV0#*)dFRK6cLf$µvYضq9 i(}C1T)|j!68R$#HysQ  %ým3dMV2:rߝbʍfC_xtCj?{IA~Cmok)߽跡}X4i [k~9q8wUvnI_kZ-E[[Xەm,+pp9M6)5VKnÊ(EBƒE ,ZWm,ں[`PRF|y lQCf ,VPsG EέRȰ, Ƹ#aREbHZvCPץ7P]Y)uKn|^h[+|ַch[3tެD$r\NIr9I.'$$vr\N䟓r\NIr9I.'$$r\NIr9I.'$$r\NIr9I.''_qrwNIr9I.'$$--- 2'e+'$$r\N>BUl:6!Q`Sy;ĮiS߮oB:*r}3nlFU[|i 4bhXiKoj>_htP-xZ\֏;ٸ4po^M{Yq^D ޽~oA?<(~:K> R\A/ۇL%t~͹`ct|9Pli-nCےx5L`%ȗZrV:Af^Kz$OFð@|T6*EǸ.O8݋z)Az۠mI<e,);jfNcʠ!_5^byͮYwP.>W-4<8^3jsh֘<#N@?w?Ѵy WT2)_}{}kjM=I3Oyڷ$ Fc<|JvܜjN_xVYS X  G^Ҵ78NfrzD~]kjbjUNt:Zř$NbrAώҳ(SG"MW]Rm$8<KہgߩzH##qLEoUZ.}WiMͷ,*/)VWg aOi|CfXS-\Y$$ `r;A)Fh#kn QrQgS +hP-RbV!Gt\G$ʌ‘kFʅvQnAm!bIM(>-=ٺ7וUhVatqo2(:oXk5f,`ZFL&Z iw۶$[/^%';_d:v2M&@f%*ืR"ڠj 6kݹ%-Q{=0;o!)Yɂ@6y_bZZ5d>Z'^(#x+R\-&x#32x5sf"pa+(];rrEioY:ܼaRwgbONt8 de@ħhdd>OP)Q{ZDJGt`ZD"rR!Z;ȅ4*L1S |@N@kC116Hc.cǾ# #$$5pFBb%#11)DXҠs0I̥p6xl2sZi]rZ9Xއn BpQ.'-WLْOҸ,.xsJӤH8U rTbTՕ]Px/,l a%Z{ dM|$(2V` v8-n &_?|A|e'}up@WuA"tqݺh@ |{ ȕٖo{o<Ɲ-m|ݽ ܢcV \,|{+Mx/,p%m}@T(Wޣ9zgvKQ߻foCKum:Xb#}4i1S ~rVk.Oc.*5X@U@꽇Š2gK*G뛷ԭ`rvKFl; kh6u5(U+xULὤ>I4mr-໿:ފ7jZGZ٦Sb W~Oh{O9oîJ, jH;ʮѺN4yz #vOxk~.~?~<ø3ſl.|xsoŨ*?Ug5y峣gEͨƎiZZfdUR żkɬ˙%[D.:bwT8c >:mT  oZ(DO]UZ=!U'( *L |r8F9@^@hp@ҌĆ׀0c/UPJ86WD]Rgj6@su1Ap"cK%8bH5R0?{6aK`ff bWG#y$;~y%YidLl5ͮbBAja9e%^sB$9&!+:hVtEh{}ZY˲rq L(DQa U5 <$Tt i Q3A("`J0B(׊SCA(AP!>e;ocT WD3 1ɍE,;@Q``%9,1‘ dL[8kY8m$%p)Q38Rb4 ± #@QHZ8M5{cqkX6+/ VK1E NVFpzZwǕb;}m:i7Nōg՟Bf_l??`l>>;OT .>8!fB{ x{2M W܆7 ɚ} o68m!RΙkU1uO=\G"Ve'&[PӬjȀ]L{s]]TO_Et5fݿOR:v}}dݓ7ɋ3ܿw{5:`z:x|}׵5o|- o)~s1-%{=_Y>ˮӸBՂ?bQdK GpO_:2u.??\p7}¥Z<Dbt?΁m?\/aq_Lmc/FZe()(pLlp K>+ rV--!zKmn ɐ-ܳ`!$ٗwph=tBm["po"`MFsPhl{|.PZBjOy@󡚻9 _̖}I/+$؛-]M`7NWˋ_8W IF;!;_?Cl *X0)LBI[gw!g߇_޾eս5?[L9"B ^9#AI̙W9XWLh.h5`k7Ezd =!"=AZ3E4^JJd 1Y,-P$#^+H"w8,N?j얛lx^]<-߷ _,bme5gQ+e o˞ rtC\öǼicÈaN,XH2rg]p#BaCpD̼1omlWMٹׇܽt~0ȫb7v9;nC7l W9vWwCSix`,(0p V0b la `l%"*IY0-"rV"&PbѲOLN7FHo,ukeȇx,)H5.)Jh}GmZcFG'BG`S\}'6qGъәM2.^Ko,n}g͸ib~s'q7 U%c/qb'ӏjoqOf2V~ ͋|Kb BXYL^jʈhA #(H8HN bw%]^lTQ|oxgF}ɶw<.=Y zVVfMe|s|3s<3l3=ywXw*Mג>ݔcL=hwC̗g13c1,̡-|uybLqdxt ex7S˖P;Iv,:4lnw ?B%=b;krg$\Yҽ,G|{x)u,4 ƥ)?KB>;Ƿ['a7m6bf:<.Iu%@У~fw#n6d$xcz֔hk{dC^3D1[ ;P Zw џ//3M~9dFܫ+-lD6ž)|W=9~qGoyhz˖EU Xr1TBy *pY娉X1#1~A>*{-Nv"gO&(S߱"DrM(R!cGJeX -w)2( ++$6!2ViǬQF+ | lP/5^#dN9NV#F qG,S€˩ъʈs""x($G:tp Mx E#BjXAd밊a,%V,w#EDM4VN:/*_W(3y쬴DIu"Kt0DK- ܦL `W )E>"=W4n纗Een;6B[mh+y8;4@ ×j|˯O\>"{ ǀ .:y NWrr6#6ZґaYqGkZ )$q'ڻKlOPH}"|cmtiUndPy"P9SW'bg u|c ׭u;/W=¨Ǝԝ#ȪZgopU?=eu֖ ThonLUOlܵ:l/PSqWD"h@QAsglrD`p'&!N7zj"\{vKwi;$Fى2eU#]3& ,aU0ia(`Q"g|6=2|38 9@s ))YX"N#83qG AH'c S띱V D佖豉 i8q0rzNuY%=av>mC{u)bnKBTyc櫵n1aGuARΙ pf;pwuՃKYaȽUuuŭI@;Z- /pMHjU3rh]':v}}dCTif^?$}nwz`i zl]cvj8^霵ښ7Cg-2Bld:?(N۽feD9#њ]OrJw]a &cwۛ!z^j/9vR0|%gEB,xLy):}X̗|fxp7Zs6˴|YcOThSY6Ngl}Jo|)m̓olr'dװȥnO_}]fMoW>{1=c1oULYm5U}֘kjt;^U!Չ g &4p[]*noBqXt_!rx LqY,CjXؔZ.ʘm`֍zՠp IUp;oB!1qV{tHbaJ3:g7ڻǵ#K^<{7m!3/6 ҫmi*ZA⏴Tj"=I7_Dl-ݐ~pQᢪO|W~3GX)\{2_<Ddt?΁??8[/ajr_VwCF];2(PZҁ,Sx}.~\(Q\H0˂Iz&l[6jWK"ޣ_Hv~TC\';J&$M`v)cw؍W._A./&5LHwӛ)*?difοش%g>JftL m2'vVW.I?\@nYwSlL7j޸9UdO+IՖu&OR#ӞP3+-/rq۝V~ܰc۵p{=-sM[{A.Ρ8?~60k,M }5r> ߙ3wpff̘'m@nYg/@-3S)rGc5Tݏ_x(m;K<]z:ܨc;')ϒe#Ƌ>}}9]LSz= ]ydj~ޑVu,ߴ{%#T7- ԫ@`Ǽ)eYmBR7gŁfgaҚNDI{}+8ܯxԥWa<q2o'Lgӻ_Gw9-wi mgJyIu%g&<#r\ h9rǟvH9B! L BL!$-hs$!߇EAo=͆J+g$72 zܫwN`s+&4p.h5:819XdbZ֛%Xb^ ;-_>y ÜXjL1 e;΢4F4 -†8tybC~2bYr<Hb7Oپ9}f߬n_(̥nnT6 xa23Ńd'85p!í$[ɬ `pLpK@Hx9C0|P4ZY#t}K U-<oJ9E,&Qk"WCpxp߾っc1#L'/<>qgKnH[=EmEާfv,SMwD;o&r)*e ʣdHpڰ(2ஶZE !I%^Ԗi>kxMlFNen=8&edM=|A[v~i7YI)MXT"5BFǀ9yAeX -w)2( +¥&=p%|5FҎYVA@6Gg`,dN9NV#F qG,S`85ZQ1|s.TDϵZ}ٻ߶$G@p9`gIeYr$9gqU,?$[eI1 Я Q8'ձұA9aWuE`;s/桭 Ԯ^li tU/$kfkTGD0BJ8k*H_BB>tG ȱMxZ;ޡJ{k5L޷Z{F[ *0AP|QrjTP$N*-i/Mk#FEh- I@e$gSGFsN_"<59#K- CqnׂaW;qla96s'u.s|sKPqya9N1,;ǃ^wqvAzQA4鈦Z Ovv5:WػJnK{7[%$cxUO6ԳVEb2I5~EH&,Fx<9㙀Y4|g`@S:=[^Hr3]TO#ŷf @u;:jZh3^^}<^t]hc"1MP )K$)P5rL6]_.k"iq8zNѾDMΝ̝Hח<CG^{U}=;U5qS}d-IR9EC┡&E*$X1vIrKP|2ڵD{icU6s 'ŭ ~0 Ju;Qk` m`;8vK%8#&xhF&J7ahgHÒz8R@yTL!/YdKkQ u!@r_l#i/H:єja򠀥H.8!(Q{pVQyhBe[Mk5մMj^k⑎uD=UEPw7 vm4ћ)ފŸeRukSZ]w22.K0֔%[jf ]zRdWo+޳HW[ݝ?*{ˏQ6&% 9^Pkʜ <1( R`^ipSs*meV2wgGs^FW8.Ul(QxO0F5Γh@{Q 2K/ycPV3)%x4SU+MB7Z~F%YП|< ~'[V몝x{nUЖ|XY"$P=(wB"z%0htդAlc)ʹ߽u1TZe`}$ќfFD0> G"2Y2*RTa8D/F$/GC%ĩFb | ? B OȜH HIpc jRu4S:Ht|q,K?QgqGң鷆Gs*"+sw\D9@MF ^3%90^p&0vM9>7_K-\%os[N~jp|q˿,C7S*?MyK߆ V>Vfzm'Ͷ<>>]\*XFirٍCp?:Fa6_p8ϫTzE}y1Uě,0G |lJ̅n,77 ~׫QUonB|H b0l0z0U6^O<{ 7cv.:EQY+$sɕd_UVî+N5!5W/\L{c2\ej5us4zJ6QG*G`&wL;Hji$FhLmv `r9N4<.NfbuUnW0V2(8I~5xY<6KW%C1vȒqB=ӥVɔr{GGQ")P. +8獶+4h#ZI+^9c*gГɵDQwPf$E|oPJxaLs> njsBZ|G ڗa*dYE `x$!)SG& E PKzstz&4ZdJcx"S!J%GKK3ȥ[/G-AQ ;%@EzʍPt3qo*w9T\JѰkWi ’yNJ!l7mF.+h;XS*+"jwk& smR' + *"XHR%9(mĞ$,P⊹<`V"Qk"Gv/R>J 61r]oa׬r\Քє y-Thw.Vqh+T(U+@whZ$)qd}rLZ'7[CzˏuFGI DG 7q&W}3RGTz/#6VK^[~J:x-o[f lrjRY_~٪қ-+̀941q]6 &E,h5T"=&b.=o&V`:O]:_gmH e7|:MNnHd;W=3FC@KlvWw׻ohŎW΃SsO]uNcF1ZleF͝QFGR :d@<}2bSzⳓEg"A/|GkTއt+= klp3KD2%Lr6. 1/aN(Esfͭ$GoX{Cl~=uy+jV.=vw}l.J IsAEĬ!(& o<2>i)%b蝔4k:-sc ͑Bs[Z| y3[h%9OἯњQכߣLNZρT!9'>Kky23W y*-%>KP >cTp}B j)k}X`䪮fαMfhzhi1Ц)o.]f i9_; /w39\O`B<UX-zg՘IDk1hFs+%?XpX傒,MtB`f[ێEc;&H E.q8Ƙ$S{+%Rc[gl&p9),IN\5;: sU g}Ѫ.߫g(yۤlf0S29jl+w Gkb\!8>jK ^Ȍ @;93B¿QSWKZ@6M gLN]'ǟe4N¤OP fE Yj/#HHLBDN.x^ʾ E8f`0 pGb9AQN"_#ȁN$C^i8` DH[XA`IrY$53Gc]XzJ'4(!s!4 hUE9&7 >&19__G)s>L{t66* g~PrtAw.\"ͼ8OUs 1(?6| J'87$x7=K$c\w)@&~o+lUQeM&FKkQJ' h?xzNA5Y˦˨(r-MP`Qhu4Or-:')G *ߴ)nq_v m (nFy՟po](5kP JIu+,(k˻+.dX 9vbg)l2&Ef R:۰ͪSa8ϮΈBR+GG_e)YDP; P^'*_Yh<ΜҸ=7;ݨ@]oʿ 7eƱfKʃ߇3IDtχ ow`\d| 4^4ta$.D):R1*V90Gu$&VwDw.7ڬWȍJ! !T~UbH՞Hw) Vb=|ާލ=mHhZeclOo[ٳ=sf$py"Wl_J;L5Gv-Ԡ+<*rε&=$A5jg;і1 ܽ]g/r@OɞGsm=ShVCd("=zQD9&z\^^K42Bq6H.܊u+¥&L#Z]Jb0Rʃp-1Kȳ_qwNB +__#aۋ/ax2;3L1zp~ߪ*@yZ"+3pCV=IFuwg*C QgKdzB3v} "a :RLݙNõ~=΀^ΫWPٖAqTa5slt!PuX__8ckvc 6R(O|eB\25G* oLQ-4׋M§[WýUDK|=Q5:+wJc{O?ή_+fa,6_8\^'K֜Dh8F }'w[h鿼 K󮖆KSZ,-Pͺ-7|(1;p [_n0?.U,:/W<MVefa HϿ`_A4W.2TZpeX)3]fO1O=Cb}PxڃxW#Wn$ &ZB(B *n:FaYGש%aIm ߏpyH K X) {9tփщ,*K&rm$3 ۛc)NlY ﶁ]B;UQ5#RE,m:UEYWEPSE*z|p{2S}י[=YڜғsV"[tnt_:#[cR;0|68R$#/0HysQ % Ó5)2t L*-cr|^쾝K!KN*%B B9˲߆C. |wY7]}y7>ƟG M}_a==ͯ@}%{*fVۆ5A[hX!4RN)):V \FNiVcoE[Vc[;6s1aM*g(PLs%4 ad#EOx|?x䕱b^{77t}Z|sȳ|ހ]<m~ Hv&eχ͔Y=-yζyNpТzW\Zsv6O L65#o_7`lųo<-(<_췏6iHkT~&L]E}kK0lqps?UcB:j쀦 Qvp4ݽR#UՓc۪.ȋy&}1"e]`@V~FTVFd \,O%ژأ Z >z~[cW53r\_jTr^d,z[<+/ !~.gP &ks ,_TO1N~ߌY36jW;b[c/"xǑz{}ȵ+H-͝Vrmc9~kzB/L=7٩X ZɎ()-hJ:<{Pl%ڛ/1c:wlB\(wp)BL@ƌƁZ@1b"iZ+In`>Ƹ` &a񷾡3:˿ϊګo}liuluU2=e({ݫzQ- ~,*ˏf]8&{+BΰUQP4{ktjGeonRy]Ƈv\̋Ut~<'gGp|yq kA~tٹ&+P45dr `S/ d<)VvԔ1тFQ4`pH~hm龞7f` tϔQ8?P8fR5'UP|7 ZMsM+| ؉%ۛ2|ol_BlsY04<,叫Ys,,Sםw}B ^9#AI0;y:GbBstpAAJ N9{3 f xd|2u9n;;܎X0+B#:$aX0bK-)&xwH‴G5dwA& u%G=qݯeY-V˒= [MEv=~EVwI %<#y$da_52aeO~ƌ{(^ێ9v Wj/~h|A!2lF^0VD.j[8 S98sXpQ84$+ R %(<Q(U*&gL%JҦkʈg ۯ;M>78/ќqQqсБİ4Ϥ\8.`0-@odX g+HŸD;@+ t.qfCsNh-#h#Hؐ\DˣF\ ^) V!uscc I XCUlP IQ9s1%m 1ݛΞvUч?; Zm;s #\^af}Lnj8" L*@EYd9Ox&C?^~dļDNy.$mk# V9 \ȩ ũI[ #F:*Q(Q&Ao!Z橏Hb\Í)Eo{-r6h<K6lW8_6 O(AV'objB/5ߖwV%,Z|}oi.7S=;8>)^}ZTU !9G KR8e^ q,tJ`UDّR*& ZLy']10; AeePW]W֠ˠ?v:`y0}1β^Ry N3TBlOI L#xK{:+$, wZP@ƔpϣBa x"^E! Is* {!X'cHƌHPNT,eAry QFI{ ׈!c [̝uI>%EKbCqYREm^akPfe|g]Ł-!*5gA:Xhtם5qYnB7X3@x}{='fד:P·yrbף9,h+jBzlLK*.~b{Ca*r#(s.h|4J-qz!'X"B31RHڏwxpV[8QȄQh]P`H`[#g9Qp_wL:͔}W~2ܪf\rhQ"s\ǗcSF]AzJ`#Ds飡TQ K> B OȜH HI@V1!mc?mR!y_#s 8 F[ O&*(;+osU<ޯ;À"jR2=w>'JݯI. 'T+rNəSOd/d2ExyjI䍣Ѷ@N|>!Rӝ^ޕ-H٢|m}{:WQ02RKO6pS'H#|b5ډ|J>|O{3Zf#Yn2{|"0{@ |lN̥?Lle[مMp4:+{d4Ƒ@iȆ8 W0b^ݘtn֓liTPJN C%$,}:$F)=_>u*'1?=9_P}w??~?L[8FFᤑ3 D;`追C꡹-YW ͸)7P|33s[ z?^0|ןlt[Ƿ'?fGπ+a+H6tҫJLn5e(UfAdGAy=IcbkXqgAzNRHMF_HX'Bjisj%K <ar* (mu^QϟǸpYu wiE1PGͫhpNsK.CVund:H]k< Ń62z0Nt^"N@I瀦_:H!8N^۳!.qO慄g,N?am3q'AD ι+YRKیTW0f 'lJYHo=%>jj쭐cOLa *1Z~\~t=wO(p\1h؛bl E 'o"Zl^cx1}6 LS^vWLP}0+}_ `m6[0 ?4:Ң@)sY>qsu$r - $fZtpwOZ}Ռ{ -f&DJŭ4hH%T8%-˲Ah& :͜D3*- ܁yTc|4ie?VfvCו޾7\Kd/bX\ w{Г$)cغsbz}ɆNm6D,;ĖSv>Ϋ˳yz~Fk?'6^'\>~cow#`4B9rt=+3?|jϋ[ݨ=S2I8RGA+QarVک{x{M&|h* Gmg lM "ٝȺvwziQwq:[=T3z<̔[zY^Bö˩wǂ'=-sRƣ~.P~󟧿z{)Y&ş]>mcS&C NZ%SXEr&l),<,9,jB-b0:;@m)f>_B}K9:N.$nyGmG9) bTmAL&ʌdXGhk1]z^5 !6j0մn;R͎|f~lR_r|fAurQ U!r3hfrMř@QXRrT@ dZڐl;YisXg~7a|:]m-l9Z fRÛ oRI'#%$sdv sɳ$AN,XP&zC ϗ;KM-3;mIx+dj6evDzgjeic {(^o7r 7BVT !Φm ʊ c5:8@Dᢶ @8e98upPNI#Lޡ I (p PR˃@`"*ZH{y`rH4XRH,m*^,5ڟ9,Rc]mF8|Llc _x::Z,O>/fŃvGUp TaI<'im5ã}&.?$GTdMg/I4#Cf$gx476 gpk%$IQ1D" JrPOkyL@'+AZe2 .SQk"~1)XhP5r6C=?m(F]~(GI4S8Ӏ/aVO֏hknʏ]ou JASs=jY24&E*#}{/l#_Nud]'އ=0V($#b}qH4 @j-FUVUfV"f:_H#O ƃj$e%()aZi <AZJ9JIy-Th3J+d ¸`I*Dp&n6q ) ]'N5)jZ$!qd=*:4S~D\ D3ɰ TYgV:.(MWb/=QF&B BrBASA hQFzP Q8'#aٳwin8QBZI[(ԚcDD }2O,xwS&iR}2OÔWx2-϶ƽ7:,Htaz Mt'S)Bv%$D2(aQm8^8n|=KQۛ{%jf# HNDis魳BF%jԄQpĜ"9P*F85!I{ST)Ƅ"Q%OlsMWYqZ=;Yj (7fy h |Vk.8׊ʎךd~V_Ϯ2@M+z**CK>4$Cٲ] bW` 'î2N]ZD;PRٲ]λ?A| ų|S?NZC@e+3ԡlޖv87Q-ލ; 'j# yf8ds gx^Uaq?^|Ksԍ~ܯ\ťq vgjEo_b7@?Fp&FG~,UǝAHmՆ$;({A+jQ,,G7^WrBm$DY.gio=uՊktǛ xǛ?~u>Kꢨsruo\ DɈs%KC+KTxQ.特s!T<.Q^ W<ؖͯ*# zͯ<}_f/omC 1vk8dIinNfC_+8s22.K0֔%[jf^1 *z|/TX5g0}(>u]D&rQ!v]GS^;'qʺMtW l:.+7K1ިo/.ǷA!(-UN//Lq#0 ;'lS1Ҋ&o9Lu\=x{7|p(=)sk?wz۹w.^]f⽫KDvc dLΔ-īf]7 [?^?"C ׯ`|{/ӍvƛY{7A. FS"'/vo>Kl-x3Zg 綥Eԑe 0ZdߌeX-{rSX{rR6c;X\3)_L1UD Bl<KDŐ>*mo_-Q+kpT&wTҏ#RTe#Oۉlӛ Z8.bCvrY^'=:|T/?ͼih~vlL덜$OzOQ1HL451sD Dy &g F8Ov ۸O <*g|<~6'-i.BI6Pk58*IӘ8FBqjcRApѨ(r~H9NT&A󿪴;#pSRDNjX).{ cxK[cx̨ŮNMD=NouO,pUڊT aJniD&Iҷd䅎m6d^h۾wr26 uEn.;C;lMos2U\RF^H:w0M N y 7Hr$Q)2w*pLLd/s*M?zX'_%F J=>XD; DeJъК!)zDRWIKyGmvpgY[f~^.=?Cd ;%05Ӻa95e=t=qFK S^jI hZ]1}$}c/X/C[/iTтgJ©p^KZ%B@_3N3'yo9ѬK=08OJ>*:G <>5e-ÉڨobH,n:zGgS:·K|Z~<)x onƇaa|ͻNqK9|ۺu\V ͟f9]]L)y͠Q4өpO[ini@i/[:󕟖ï]{LxD-/g0FVs_DRuʚWOw.pPT@Љས:Y ` WZ n3TA҇*GCܩ^q&Τ&rm dZtٮ"O볻׈TrrDտUь9?>G/߾nfs6cqG 8$XwI %۬H:4Lt|Y-Kn&BXYL^jʈhA #(H8Hַb}t){v6n}PM 6#S)Wov UYetrsrY`mdY@;q~-.-DWv )IXzW%ߝdN0;<`.Y֝6cGpaj`mW6n"IKj۰g78MڞȖV6t\Ƿu_I-;M+X uy>/нp{&^>7J>=?Fϵ^3 LR +k6]wg^W ]_ZeRHCPÄk ѭگغ{Vd(Dً}? td4Cߏ!c@2eqփEĖPƽuH @Ns4TNv oݔVo,^o=W)(__6VEOA!XI“*ab:"Fvw s S3nPnES*GM 6}B8;QcCpklxAglO5/rj#y6[MT 5 ʣdHpڰ(2ଶFCJbӀ^ԖiFXrYyL:?@ݾtץy]:Q)4*е,bɴIqk%@Kq#AȘzE()7j-rnSuREw9lV@w$JH!mWFfd :^Z@U֞XtM^|! &U1o%tcX:^y|J_xU~N)>L"utoVQ؍\O?F-%vlNe %eR|s*șy^^]T櫿&!(3@B Ȧ TprS_ mJ]MlZ6Ms3J^ZQϟHy2?+n~Q? L_lvW'AQ0@ןjO%u./jXep2ڜÒ2j^pESz?]ǿ^輛mWW\aejbɳ>bfݓ(,sup7yao0.Nw"F4#P5:@A9&z &]j㸈tW6.e7lx-h&>){DK *"I81Ә,:J9L9f#X?A{\.HwD $1J]"*0!V2ezUAP$@2AZHy*U(\A(]ja݆ǔ:w$($(ը 0ĂS`*8Kcea< R(tH "L x`,Uu>FtԮ =0AHQ$`Acd0$\i@9 :20|"ijI~ĭ0_/8W33} =pϾU7:U@2i< s <˛{;3ͅ;~pf'@ْ)=ΰS m.#ݡ+)(ԝVAޞs@0!`mCqTa5).e0(yh|s 7>3)(_~@Jv涹3z`i5,at9QS~0 f0bZŇtxUVJQ4jӳb1Z0040pҭ'EȆ1KJlX1k7en*'Qݬ7Ngww PF]?w.1Qoo` 88XI;Np/O547*o3.m},>Ҙق[ _&_ * 1PnY2+r_Aox^%U_J+< wKǴSH3:((@M>w)&) kXiy^$s#ɧDː[bE\XX@ uqM@!#JކK=Gs(X4I dБ[F'p*K&rSnSM5O 4#K <)<[ ٰ[i20rʘ\-?C6OpIߗ:ކ,P@ٗ0p]Z C @βaK-]^^}n1 M}_a==ͯ@OɬS?Wk-5 mU>qցQ\!Y8qX> k6*y sR 5(o+LwkX~Y{ ,j|D XwAEQyI7Y`N%iP+a,)]doue-tLu588nC'i%U?Fh[JЃZP>Z@0FdjE$rԊHr|"Bw"Z#$}RɈ]mLtZe`7Q YFbBRܞ),/ mKLD'~KՒ]SjL>99ܖ x$ )[X1c2b=6MVHKēDvP{;*g|<d\t\Q.2xXY QQR4I男c ,SJ.XP3^9Ng 1I(J LH}lYDը%wUSӻ?_zvext_i7WhOK{x܆;%#kXLY{V?'dZysUZՌB})n`P>RL@vťoW1TP1g&j[3FC)*[/| x[S29Žy8k Gta )*`U63)).eZZҺ 1VrBÞzy'%up 6[S&;е1R1[rPJ ^58 5NzN9mv;~.6LDT]iv<3o=~;REiR,51-XH a MvT)Frw"kGlʟFz"M>@D?g@F8J7HŢcr=,K88(jZFWL@;EA=C,lPUWtH瀳BtLu$-c*U1gȅyVPM&mA=9Ss[tǽhjNz;D7-|b sSAze[Ey;h !kϩ8>F;ڨmT4HsG?N!9wooa؁ə C1$@V#QFPxԻ$qWbkhQSw&$+.5˜ȪY d%6*tD]ҧVi޲L΋r%Od+<갑(9!Bnzw{"76NWeVD\܄fmzE j3IWтGϞћMLz7fKAܼ yhݧݴOf}򅱲+#7n?.{~|치|9=ph^nk.9t3Oc1?&p>'y3˭w%ǕȦS/Wc{(HZK0"sS{-tZ7:H-޵OֽyM%S 4B*SNT 1% G_R=2Cd}rPM9+lZiߺT;m1PeT.2%'kR׵x;#gy2q|}{R5x_!ptRn2qGDu};A;|>4O zpܦÕCIIP: pU9!+N6izy=.[FRk*3bjPŵ޾9}Awy|A YV崋 #*| 6p@3JNo`!ih+1L) D'm &#〠dfю !+lHOZ@͍HNW9d:y_7A},-Y PhIx򜥞WCWWox0{ns9ㅣϊ(1$OUEm03$yԲdkj7H%>Mכ%[yΉՏ7[zL6_ Q-BR>'dUң#QF3 >@˓:4r?"ZOS ;vwd錜Fp~7A9 X6F f~ Fk⹖A[l>z\P 5ZZ;H_Bm[*-#(gLHw+ҲfD (ef&ObVbs1;j2CǼ9;c? =%aU٠OU9Ҷd1{ X}4Na7_١|A,E:ܱЊ LILjHJu$ځ=U$cɒ4BdVY=neӢ|5b}z;)m5\$eR(wަT0?hrlyTcdyxl\R[;U5}D #t1:+68Ӌ^Rl<hgoe \͵.$:yY00rh}Sm*HH,k*>J.eUQe'k4Z4%J(URP+Ҫjc0$[1e_A `2<1UvISȒ%x Ckaw pmvw8˿&㫾'zby>zy$s5/bc]_t}l0  :c*B\F*,-=O~ O8 ɨR*`(Rh_M`lRY:cȔs1r&JjϽU+>29/Ng+dǼ ֞5K/s+ N"|~YTޥHA:1fh}U*M!p&hԅH2 [ZQ!(^a+e*|{okd"*%M֘ T5uJWlgq;;gl7XklE-#YЪ3J`gz\R/ O=QOm}9{0;Ŕ6mo#/\fLףrAuTN_JP D/fX?se*^Yhn,l3?UQT]#A$"x A%%?`Ȼ7k"wJaR>*Z٫ВKd9,u\g P^mˢl[nb-@( $=)ԱwB߾Vd4<Kgc4j=Ec%J%{Z4VioGWF_%E\1UR^\}@qVH\5 F\5r=q[mWJG\ю~ yq">12r9zp$˨D{Z^ W~h5eδM_=ZvS(HWXA^zSOJ,ϟg{B>jʣk~,(A" rՋ BU9<~s!J2sQYuvI[5;CFc eEw.q 9%lWIe1 >AٲUlvzX},Խ3-D?-K蟳^[<`z.h5HȦ"TS;/$S3Y`)0q@ܓr ,S4$Tl*YE%L[$R,fOn]F*en-˰[*0@Vh"# (bZߣM2ZIh}MV2S~\盟ꗌH,N2+*ٙB^XsrhH'ECKSJүKZH|Ows&زΚ^/{n}28Ucghk3+Y JϾڔ}E/̾z qLx6~Fs{6jў߳QT~Oev_%7_k}m}Khm~ߺ4ڭlڋ1#ҊL̓3\t|}247_߭飍v7J{krPŪIR$K9(JWs;V05Pc&xEmh}K'n&N&| F/: C *CzAT|MFxRBgT(e4ة_dMze>XQr :T4 "P6bIJ:#g;̷T0kpֺhkow}<_!xVѻR<#0boEF:QEotforٖau=Yn'{sq&nB'[l9k 5aIHeӂC(Y2SKQ2!~`]H"TT6aϮ/;xєuQ򠀥H.8!(Q{@BXJDEMXA㴎:N;&5)NBEhoJS: sBy^މ a?3JZ^Q]w"2. 0%[hf Uz_sL|k}D慨U9W2lٮGV%U'v<&% B1Ͻ09t +L|+q)sPsଶq. wA!6 5FΞ-}ޢ JT;l'6S2e Ӝ* Qx7":+Bx<_k^˓.11Ji>\6jǨqDS."DfYwB|eM'ƚ@],5eLȔ=/5^ gL7ܬ,çyweGuU7Zh: #gr?Peo'𥷦Jۯi QWSQB9aRI)v:7np1CSTjIWG'$ٖnD鸙\-_pkNkCqT&1T~Y6PUvz#ey[_)>|nnAfTr O#p ng>/'6VG5OϢ̨jݛ7WkyU_fWV3b+eKbn( WqeW.z跻,Gp-vGBF^;# ìnȲ!ƥ|̫OhX㘃l7QnTyYri0n'eiS/ >ѿw :'9l0?n?0˧Kǯ_)_?ϧ'ΟpfpQKz5 xgCo/Z454kchIu˛r͸هJc|V MjzirU\VG9rzGʢ\ȴ6дWuK_R ?Z}ڙir]JW_]ߝeUzƍ!9rv}yCnH_B |>"\oZ0 YQ[Vwmp g}b̪IB5 W@ aV=f|Ti8{/xi*֙&IE_-4o?x3t(EM;񘷡Flw`` KBiJmTa+4h#Z\sbrsL 6eoloñlUi6^V6ߞIgMS.];WK0yWRB`=Ln/{sj#/ 9J=*|:X^X!'Tn5As9OOt&Yf#$ӎF-㩎F&BKtVH f%>L+sYxyn9vְR?)O\5&ezD Ǚ-P=g F&ACzUWϬ";V K8(* D|^h,h@СD O-JgF.Q&NNNU DK἖b|u͛?DmkX'=%@sQ@B* ]{ %<#y6ļz{O52 | jaeg'(I oN';OB;*) !.gMʊ c5*8@DFZ-u NV@!#b:N&0Iz*J&)"B J-qeh#-yM XP>J̒s&X~C$|Ion:: :DH|؞_y1ZZx ׮@I sRxܦ\=<:v#m-Y߼zA#Sz|3<όNscp ǹQ"СNrC$ Iʠ$ԸF' xb.U+P2&h-l 6 #g7û) x8ۃ*O(i3%3 mj~77e~zcP*a35< %8 JkRɬ 0*iPk('L(Em1+wxpV[,DCEqeh hW#gv!H) n"Q)AKKn*Q|BiA"I3;8&Cb!BBsYX-@"%STEǜf*[=thU6*@`*7F1}ZuHX =儂@22> ^7H✄Zceȏk^KzWJhm`ʗ[.]Rɤi:0x̙^͠jn**Td@y?JXDEHQI?XkR Z kuve-$AFɩQA:,C 41B6z+…@qȅXlIT& O5N93%iٍ,QBO$t2C5V{tr}FaE텟#bb2Ψz&ٔ@ky Lm/Е@ Ksy+\ʡыx`QxUrda $NGDRN[I+eO[i m`B5NIpTSq!AؠCpyihu)iҨ|Zu'&މmMnvU'EmH53\# ȑNDis魳BI$Xm03T$52J17&9$"it*Ř[dmc)rvsFx2RYtlrUS`["DaWr%R.NxK%tw;].;o`tw:Sp;].NxKӡ )Mkň}j g !C,:]h#+Y,.#!X:M B?%BPW=3HCRPZc$ivWtUQ _yDMިrͳ$vzP?I >r(ɇKg2zByz,:/r' %kw}EAp5å5m^u7[Ż z|Y=s ˬi^:j=__3fϘcGuϡ |uz`╙bmڼ7^^>WN:J7#m< iAdKm2.ti /RAi/${Io$k<؏!)⾘_M~+Z왦45 S.3 xŽ8ɗ=WFņ>o 6(;T73In4`4K}۰@u{eCgoǼu+*.84ke;B::׮k)?]l̠7*1oKlo/Ha{|GGv, o 'IT2#BIRc)iJJ`Vt0}{؛`df^ooiVI56mq!3˂籛[P _ۘ"l꼬:l>#M!y?u]jSuWRȾ}U™LnY ^6`%s͙.+ 0w}e}y٫{ًy٫Tɉ%S4!iL8j EFe 30$cX +)$vX-"ro-y /hlĶF6H\Rn?| k>]ꜭ6tv lu<ɘUx2|-؄a5U6Wqr(ʝrV9jbe); ҊyDvDKqw֬;5IGVQm"rK2A`kGɐ > aQ/~JOuQ0THMLc1>8v)&8-Op"g3y|l)=@~ǚ:F,"DrM(R!c( ˰Z0SU^ANF0!W[c*5 %Jy>%BkY˼9;׵9;F {B0P1g9J*"砷kex#׋P#XN#4zq&a0Ou!Zj\6glH)=u&%W"?MڛJhݯ m%cPo<9us+"`V.p9N [cskW|{4j&F%9@ֲ%Ӟc'AP(VHC-)!ccj'ȍ`۔TGEdQ0A['RHDcYΗ$7Rg25!VE>亿v~2uG11 {시{9E"}y_/rk}7YJ./Ii1/*KI7Dl]H ʟUɩw'L.R ࣟO_F0S{'10 pIx\/VR;2*B0()eiBgAD\~R}WUAY?>HrSFxC˺\s*4OenV1\a)ji00~r0 0{Qm3J/?e3ʒ7}橢NO2؍n2'5Uz_pc.ײԣY°A$*#81",:4SS$B->\ Q>eTd/`@#ܢK)(ԝ7vxRLAޜg@0WiC(نd!jr'tNuBktyPMO3+V8٭)H٢|mg+CZ\R .O6`YO'&O&?wpN\&0*͌0)[Q:giz||=!!xdN̍:ZYVvHƣ]/03Fbq$Wt4 iF5U>0 V0b:ǣl17)8*AGMiԦb1Z;040ĥc)>ٸ_O;e:':Ǚ^v cx8/P8i$A^D/ 8C󶆆fCSŶZ6g]/660S@,ف%Q)#àb`gˋt?pfVB97սР\EIS~X wKȧtSH3:("p_d1]='~V?Zϫ2$ƕ$*-C t!nEqRbasVc#E,8b5(aJk[f2//.Z:^!X@4I dБ[J'UDM4HN+:*ڞرE}vzη=a˸w<%WX+bR\`ګ+0ͿzMpfV0#)6gF\T$4ƘcVk>u;6li]s6$J6;Tā"F: lpL3@Y^ wE%KJ>٢L_YU(cr~X7Rې (rFڥ >$˖ \JAヾWheԀh à ۏq'T5_ `zo jA[Jh{s 裗bN[u7ǧ_k *ȣe5$;cdyRUsFjG8Otg09&<~,¦& ¬l|zl @-|^ mوmgZ܊4no48Xƣ^e&#QHYu2LwZ D佖豉hj4BZ"mWlr2X3k >8/񗈊 ( gTi1Y̱\<, y帤؄(=UE50$5IXS ^9NA 1I(%;y2qoDʵFf RYIG$q{Ɏ>99h:/}*X+ܰ'eVc =%q1ګ;[X4VfI4 "SˌXL)*8%5F/h_v=Ʉh-R!|IK%b:0cL!նdl-QE)]elY^U)Js%[je&^}ꄯjpe8(߀n>DȀ¤OP$)ՊtK!#a:0- 9[Zȅ4*C0 |@N@BbEr01mpec=}C<FHH:pFAS/N q$Rc =s>99lL1ϴ/PQ[%uBsݗWMUj^;aa8H"\0s,NnP9v)3g\Kr1tޏ`ϓ L]QLe[TJ8{@)ST=Ǿϟٰ 0c@UPJ yЊތvN+ ]hi? BBHjA X uJ2yL"ئ+:v)DЉE`g}v)Ig"M(e钣>f7t]wݞ kI+:Y[Sת82kNOsh]?Ϋ뻇:P4s`ׯf!̡[rn-oKoBa2ot0mV2KYwcoMU2._A-\J9'Vw m6%W/;B^ats)ax D/*蔋O28'e -:/CT͇g!?MӪ߁YR| " SMt[Vhnˁ\]Ռ;D-]-OTV57v[.DJ+y4 ȕHJԲ;$*z3J1J:"q3F\%rѠDmUx*Q){qŕfIuDٻF$W~\VW}c1,d6IJy0}#GSj^"՝UQ_DA+ F]kQWcWWJBuz-S ՓVIj!NFӨ\rOPWgOv|hlB S0;0_~v%y݇:PD{0D2fBN_Jd),LfL >fyH''9gc L! z? ?]_,a&U~b7c7߼m&_hyz:|>J5"N[PjYC-ke 5t$Ȥ'@=e 5ԲZPjYC-ke 5ԲZPjYC'h!_QB!XW@_KBs@(Tu'+goñuhZmuhxhX@ d@`d}XaCva66~;d!lkРDitV,leT)/8DQUuhޡ|hrΌOߦ:/1m#>&"{zn;AҒVűӠ#che칱pi :c :h;kHg}CGd3dsd{;Sh|= (0חE!5F?{1`|!3FLʯ~zq]^/t~Ee$fa@eHYr1,q\8- w씥tQOZRk+M"l"+5t\E߂Ns";'-4F_qYVu-:v4#T^L Ž#Օs _El9$ͳ)h=j ,oFw~2Ƭj ω/0!d(jo]VƃG.qggco ɈuigY %jPU9˫t|1g TT@u!v`sEKWo8V=({}vQڳ$22& Iu^IJ ,$sZp! b]Enm pd'4 &3G!U#xPrJ̕Al5rF9Պ5C|ӏ7 0>[lͺ L`_:ͨULZvEWALë'«BY8^rhw7wbSV IwiAC)|G0)9mu3N[7CHmFy&pfb'!hЇBrZ蚻tx׿zwr~T{ͧ?O|S?&G8ND X0$d%bPfEq&sZ[EAЄRf6}%/U+ݵopfߜȾ[[miKħϛ_܍>^`0dԝAuX8fhS6ڤf|pίMg2mJr, `y G#j >-#feUQ'Z,*Fo*~,Jn9 R"D}￿H44b$D21!GO`$H5xi\i@!#SeNs>I;.#m6'ύB-= UWj8 -oH tWk*zbel],Y2-GwTZ%*lYA.؀6HS͖1.b-^z->gN t>+tR'r*`&D6󐵎FK0@g]e ɲ8 H#<0jO6qLMZxDfA+) ^H"g7 *oOY?F!Btב&aֿfҜޯ!Gli[N-oD2%;@ ('22.WB !pT0M+BCMBHRI dXet$39:T#gOIHJF@z\ksJzgɜ\k)*Y6A.BnB CfVт'J0p}$h);6!)2+g'R%2rѩt^T>[z&dV#ȱWeq\3N|dWd&RddlB8WǿhY݁s#t;p*-Π<;PY{,dY$sxW,S39LVgd':v~[0ɑId &y{D' &ޖhIB\T<yҒ; ,j%Y0}мWD*EP*زRX: \g@G,ѝц\;9%iBI RpeZ{^z7Cۍ~hbOYG'J%Ĝ{̿֡3™#v BQ^? o̶%LG'f+:_0::?_`tr;N~>*QW̢4%)Le)L~/g%Qy;uo/:Gek"݇έkgy9,,Pҹ%TВ놹^o?~ח)nv~hD-nx4)߷w?8ݎ}yAYtcOͲŷxIjI$<ſ^[+R㕿ъp&r2‡HLщ15Y'ӝ1w%͗M||H^ -O']Ho=kus@:&R,0hsqs A:5Q![BnbkQwS4 @.]'iiq!ci%`x] k>S?H׆v0xm^ w[#~67=0cd_!meU7/;:1}n}m}8咿V,ټs]Yhd~6D1ѐ2fZ8l!Kw0(VbءGѱLQ:b:ZY;oV a Ip?$ Px&F##/Q 2YB0&dH9ܫ8ks77N8FJ8*}Oc<3t܈q_pJq}fNJ\ycr:?IɧUYܥ<bt&s#)lfa1N&ha"H:+r< &J ڠ>eTU 8d&.$/b"4#"lDTa6T6L[D7Nʒ;. n-0TU5>9vV oM7WTNvxroϛѮFV[kobWʻsu2!j21Jo2 R2nSIĄKdlő)BͲ3""`c\!2U&f"mDg"sgRR5c5rȲߊRN qsu uӅGH%>- ՚g'ZM&0/ɷ~gt~8cx=r^Zcce:r Ɍ))Q\ 0r{gi<{rVh`QHQԦ&h@]~\4=f]8C,[{956^qy6ZwI'͍L622"8j؆&iMҚS^A(yDOcvbkGRѻcVZi$ӰYd41E埵 D )p%!uλaɬ0p#xEQ#cV?/ d$4=|{Q1vè3KgGuΤx61⟕_7T/~_>-')e3bnn?Wדf> FNn\3o6Y-xUͰ,.Ȳ!|̣wA0rf;^׹Ig[ed}NjuU_Z- CrHnX`swe R' szF;~Z)TcRY?y5.ǿ0O|I.?#8GC$AA_A4-jۛ65lѴ$MmMvyEOهX)]?p+K緻?Orcz:N=5n\Y\C'L?d#N"%vq2G߆Jh%Ĕ39TP Ax_e9Ľ~V?Z/;˦{UJR1$MF]HX'Bji#j%K <a (oC[2S8jt(+]hQsL,2*$\@8|c0I =iPolMx񠾉Lh}dwzjWsI ?PL&o&7Žd*9o||yQmT8h0=5QS^ߍ++`|w|/LdˌYWtv\p 4!Ѩf-KZЧ.h3 Z97o)1q%4goA\!Ԝ<.zYC+q7tQM{g ͸899Á70TէBHĩhKyURP_K^%\G\H)zA~~1iѐf-|('{Xv, *#7Qj6~sO?%w8 Q 5"VfBN~'y9tf]Tڙ} vg,RR 'Av9s*wIX0.wJjm^e}l 4VݝMT!U~LY2SJǹ< ) D/*.z E}xjWuvww-ـψ% $޽ߜQ䜻`Wcmw%UJ gCoKo_(A՞ PRj"KXC#w&@)Q P)1ZZ|L"+ ͼ?"WIKOlc['ϲek}Tn=lvGTC Kc# )/$D4f=;ؾhBK Rq%*Z,RI8kID(C$ :͜.!NP-EyQbSD2^@:f7T]W\9kI+&VNaf \.ϐ SxVi(tŤq8Jge6Ù-cy<|YΏX-S)b'ƴbIo{5tQ/R]/D66!2OׅV/jVʝ{do%nN܏Kχy ?ׅAhgr Lz$ 2sO `8,Y3Iui|U@-|*2a;kw52Xg 0kYYg"@'-}-MnxEݠ=q6d8i6rq]ӎ~7;")Z(Ҍ7B ^FlIpy܈or^>6wW:ݬ{Ő>X TM Sc*ɃAt긑HP4(ut Egz9n9=Ov4nٻ}+A8WN0"h(m}ݲ'~^-{}BIo̱(#OI|d&&="PHz&Yf#8ҎFJ㩎F&--B*#A0mmn#Ur7v8c Y6P,^|D7IQ:~MV.w"i*Sb1)pRHISQ } ɀ _JXK[ijfu9U`^@ԨHoT90V!FI\\Xύ$!\bP IQĜS)OM[#g3TE ],ImfFVkTEphyg9iq㲞eB#ALz$o-0(y {u$ C(LO qtDQ-elGrzN~IODj\;/QM=t6tz&4Z$*QG&q ^]r(ZCau4m.6noׯK& t. G:iϥ΂ #, m2RFǦ}%xCErY#Tcy#<6kC"LhR /EJ؆91r6s8qtVhwoZq]uIֵzEYI ׊ϋIjATZi’yN Ӟ[YS.bNMbthYm8札9 gI8Å\(H !c,$)[0$ xb.7EK@QMQk"tD,hac`i 9ݕla$%M^8ϔtQ6}71L_o &$#ίfUotRW09˜q&\D*I9q@4^FyHfB)n"eJ5N5, `eh hW#gKSrpJrkD|*- 5Dq΅J30.mE ׁg"87p Ml|ab*BW!ԩ"3]ZEJ:YO9Tv Ldm2,"UNʝZE-nd"a)T7vQrBASeF}.^H✄NFg?SE~]ڇ6JJs*́kA9H@ُ9t?l*SqY0.B3Kp$9mNhzq~ǫ:"p-Mky!1.hj SAsA'&!Vj3 ְla>+eF{W\ F 4³x\6jǨqDԿPD"RLAD4A )R' 5*鬦n'm@xUV#!J.2(Ľ'vQ8I*36˟ʹNADr6BBtr4H"=;M]GYg 8>)ʀITa| r.ed5e]B1B4'KJPS F D/ $.1>="s" &y>b cDg#qyC\`sGw3Yzo{Jx!9N==}<_iTA(Lis3ܹsy;E+/=3BJ^b/qw^؁8/Gd~9[3Q@6MF 儝 ٕSlY[{8+(obO-I*yDlByt|\-pܺ0ڿk(Ǚ|J:!VoCgh+1R_u}AQZ|oSciul>_7ncYB/^7ƅ JB#2m[M&ieF<"9bk|O@}(<ꢹ<:T[s]}d|6=p R6'~U;=k8͂KAT'YWG86֑@m>nfyȆX j\V|P/g-Ʒ=֭2rlYM zQy3  >? >z2 Yi}Z 迪?on9Gs^՜.| wNy6>~}O;y2}rw|GFa1 mDpc?`h8lhCKSO~qy2Mbak0s[ /7jegF땟രM="a$FyOoQSu nP>䅘\{TB'lcakGg20HF1PGU4I8ipTd{\1(WyY,O~uE^g*x]6"X; EqZ wW߼?sdWnv+ځ͟Om}e w?|/5ŹZahZvi=eZdOdf4z:[[E?LT3 ajIϗIBVx>^nD e2WT%0nF.SO_zK*ڙ;bf\ԔI‘< )F\!gwop6Sn v]DPg? ϭW}k0yΖgV-/ZZ7L_C_Նhn~8=krګf^lqQ'j(C8姢3iR\1kM `2z1|%߽[kGҩq6Qa JtQ55(a#dZsDĕ:Vā! rѵvH$A1l 6yy1QXp)x|m'o2u-M5z1Go_DMj5?B~u\gNvj#Cjσt龟Jk%te>%EwM"JL*AEYZ x٣-V?,O:Kx42/*&\Is<6HʵZQAMƜC2h/6&D7PB$hT1>"iq:'Z"w L;#nd;vE..%ūyfr.RnQu*vKq-6qorz;`4ګoKAj廓6<:'OTXspnu0lŁ!w*J!$S'o_Bd 6Q@% Tך3rQV/Ytag3c]]:օr U)6ju#7.G~i5 J>{׮dY˴Db,90^OH{9yO:w쒕)Y0z!J` l+2Rdjm%J0m%cU&\MKQWZ*SvQW@IcϭF0}wMu@j5{u@*v+ WW͵;uΫD1ع2ZyR(ןå#zOo^k^j BF#)5($9nqeNkIeȤV!o{"U B"O<\̙g'vj11K~[<1K7KTsNcSU.Yꨏ?3LJ!@.JM4;nhBP,H(XQü`&9MULBQ&,dfZӍdleoN;5RHISQ*dD Jݮݮhzk:t_NYNS Q&my85hHgTh6zAhbX$ݕ褜jަz|ynԾ!=O.whlJwnt>v,!}C\L0kX&lZӵAU~O(D AzQ^:N *qtDU-eܹ~|= [~WI&) %bJ\:ԣHaI筧! V't~2`Dj<;,ĩv!ă2L%DzMt~vF=p+q>r\ {D|aƧ[ЕPfjڻImp?F:P"4[$oL0, V۔hn7zCErY#Tcpx#<krirhngCa|  ]"2Ysd'H :!\>*A%N-0KD Ġ3'Rnեݙ. r5A*ck: 8.j~~J~/=ktTA8̥(0?g .?bWU F)?p^?4q NCq J(WȀ} aPN?Us_ƨcߔs$Ϋ䍣Ѷf\Byt\M ȷ?FӓnfyȆX j\V|P/g-Ʒ=֭2rlYG8<`'X RݧŠIMᶛJ47U.p:~ћwo'oO('G>:9~-wȁqh[IPF7] ͆1$]>w-.qZzinpkC~uFͺK6q/;\Y]W (Mɩި5h:UF7yUfBhGڽz*ń數t/nGW}L ARI&D#9‰Z{ K <ar;PPIOm鼲1坑3DY$@tiEXd#~*$\@IwڱdK nɢ('C9f.zȭ0בѴxy"Uj,KpI TrJR*9gjJH"dWWQ̆="`vHi_a]^gjqn`/f3YID3o7/w?d?]{WnDi+zVoϑ%s3jVKL-\\=Q=-|\Y`6]nnxn+@/@dozx곞Rb)!EL:"sErj[c1 ;ƽzp,inL[<{Y:3wY8oIުdIbvRpAH[HKOA;):3ZP>vtE;1v V2E [Z%f^QvgOPD\WDSI'{&t6tеw㍫둌-Z}|iɻ6|j\_T{ּ.qzWTޞ(\llZ\qT`m2Hh%BD}ZseVQji }M%S"gu6jTod&7`a7 }g,t %jvG[ ^śۏ̾Ҳ_ ˋώ؜f9FCN(S  @RNM)ViF7Ufd+4/f,u>) @(\MJ/q#6__dPviBnx(*J#⌏J>R`:&rJ 1Fbe+:jY-^&) $9V#Pcuَ󠾝Tȥ b/CgD "Ni [ ա:m`j,\ⴷR ׀OC2ǪfY/O6Y[Y[k;9DTNi|w#v{>єHvD2qZqY сeWGVa<L D~Iţwp&GT883噝Eq:rhp:fחN-7aKKF=2@MaO8-b( bam BAXg_-ϊŁǖc19RN8@ XrU#T \!5OgV\&*Lo9m,o?Q#Q=~Q0.;p\ɉ!Zmr *@u剴kupYP9ocr!9C{Qd aּSP!|YR;.xdq^uHjp1ק'mpf\zwt='!76N_ʼp|v/4MĐ67^)EF2z~m2W-}oe #ҏvO_|Yx+ܬww<3ŷ\8u,^|zp7PyT͇k11v2fp>'O^)6XC߹C^*-RN"ɍySz:_.u"Gxj,sɔǨ,xS"]"d2 (fYR 2KFǡkx=TR}:@&8&VڳH[rF1M`IՇܻ)Ȩ_;o3_;!DvI+hزvpMN5qTRʶ͎t4%dw~ەhy7@j-U}pNq<}FmD`gϧ-Z\ڢ5i9hMJ-/j+ zf/_xޝ9]FG,uhP8p86z̴sDhU%Y*J ^=jmF [ѐD;L=#=mo=l}s yղ&GUo/lX{&~ܳ&[)躳e3J~5r%(h| +V'2tR8R8W#j^_^ ?c.?}j8[is3?\q@<;pL| 9V(j\b Um3Ld\RcU[K=o/X{9G4x8sm=kKU컻 Mv3I6_"}2"8ܞLxXCVv#ʹdOdas=Y˜ s2zxdÚ❍iahtcQ]W\6^R{ .Y݅m{`\/!o,(O(wJ(b |Bqq3̋y2yCft7LLEX--'\w7K[3,ҲC} ک >/&cnƻbJ-dYƢ}ǢxJ+κ уLk_zhgqy:_4-4,j68x;-tXu]..g";&VN+cDsEOBt1䑒+:SHsE뭝&w940U7n:wS;ҦǪV+AZSR#\ sY\a%(!(C&sA=i*|,{:,{wrYtBś$7aSв (ȶ 'Ix]jJXl,PD}[܇MȚ( 9"Es IS j6s !fN\J9Kvv&%i˧Tavw} yDX)qn:d[EtxڐI&VLy ]ŭ‹/yfFbVWw]u_0Kn>{QŇ'n_} kYFH(] vLXBp){ ŷ(!x <욍P"B{kp몼T9[$L"p(,OtܦRd^Z$:U☣VU6cŠ!)kW4hK ա:yكY3'-D` d8W)Y'0ʬ% ¢M H-*;ǒAA%](B@Ѥ4&\W@1)lyY Sl-F4U V,AAdvI„Қ5s$#Ah*VXmI$پޖ ؎:fjZw֒8@0.Œ%L0[$ݒW)"aD ),u-P&fDx<Š &R(B\ 0$_D|J98D1!dD Bո(r z*Ɇ!ɵ`d!EboȔ$LdTJ8 1sqʄ& ,(/DK=H&4Nr=ʱbBZ+Z-ݕu )IuI WY<BTVokmXȗ{GC@>x' 05TDbHRcTu!1*`p YsQ\ϋXpU.MNXXV6uqWw܊6ZXI| x .m*҇u\IK r*=``:\qLF OTXzaBxPr Ұp1!ȄO W$2V** S=,a@/.D,/xT7 MXDuvP$ 8U+ϓCW4+JXmA4SRJRP `|y:V5 ޕu'SaA[wmHuEj6F87= \ZA ᨘM B,e}t-&@Fq  B} hwP!^BGmhv)0VcZ8As^ πB.ds$y=(f@cb[-6*/8tI TPALj4 ?x2Z!vDDhÕ,ay7( s.1BxгXBuDYT69i?0efHY g&0QIaP7o5BRj[oؽE؞[v^ @̤F@ ͸tW/2cucHuumZ7 k1Ҙ[gr)NqS䟦ۦF&AK,,aԵnBa$0z`2o6:W ۖ(v`)jY:Z kHhS@p8\V,0F;fc\ q7φ0|P1rtQ˥P4LT@=B!*|0z;[ j056I*dO YW*-W+fc lM?h-ϘV0Xة5,J1<*G`'Z7{=k`Uˠ~ +aQ0'#b `Niu)Vndj3R;1֠^r(A I "t$ ~1Xyd\[Ἠ)P7tBe_!wU| cib7 (ťpa-t0EVQX)a#,#Jx>h9Q ks !$lUlU.u]v)&x,̬"!"!2l6]NSjHҰ56!ץFkT]w*;ca70&,0@5a^I;]t+ViBibjuM Zߨo}q1AD)pHSS3%{*ͮ"Xf] 0;Z)w i|<$$1!qц7|9k6vr*7]7yLs0 Z09Oo_ct6Hep| ,Lݍ3LշπG (b@PĀ"1E (b@PĀ"1E (b@PĀ"1E (b@PĀ"1E (b@PĀ"1E (b@PĀ"e@In.!ޛa@!P43d_"s< I91 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@ϗ .1 &VaL 92d"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b=c&oarA0ϸa9@f @Ya 1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@"&1 DL b@χ tދ*ڊ7~R]y}Y/__}ϝJɳ~I +Cĥ*s;C\bqJ+S'.U)%q飩^퐹ʲ1W+ Zg92W\yn%ssqt+kG?usk՗c-Jx`nbq7qy\su'i9cc&%b\q2W@lU ^%sU;c 򩛫*d\Yuu_}ӷ`U)O_6NΏ1&^+q'j5m| ߯-QTۤFbJ2i,%u\//qx0x햨M"Fq]ɰGwx,+d+,x8@q)e]/,qx2~, +~Qy? l wwm>b iJmT3Ǜ?yj|W-8UiqYF$kK%ڒM6&l˲έR8MMA^oj2͝z=}Ur>j2|C'r8U {V]T_t}"5gЩW x1ioq]ϫB:ϬU{q3=I>R.cHZlɅb1TV]Bvn0kOT[ZⲚ%tz[[}wE-po߶VPp6s/ a]/#p-Le `H:91ZÀke0qn^+ RL @˓u S1{tk,(;G_nX)Oյu=-)ョ:hfHi=)o mZd\9jkځ*c?5 (˴ƞ;+i=UǿGPG_V-?-Y\(ւN7>tvs{ tY9~91[vҪs\I;!Vd֕w 68=R~nl6Q-+~gP^G$n%ۚe閉OԔi'5xjI{'JKaDKZZnϚ'2/Froem}7ВK69d,fE5`Vd0qnF% BS+㺷# g{VYbliJHb46p:n254_c]uY ~.MF 5 #AL+tC̓bVczd:iMN5Yd6I7JyD8TS~~"~~TS= TɸcH DL4Q9MLpZ ?0 L[&6bHm_?=[qme)K;I 3 nopKoj<Ǣ-9'8Ġln- HXgqLp7p 0 Y287kqϦyiaή/Db.WPdHƋOS˼lQ*"p,BF8p @{e{I܍bFϏF3.t`?kQI4eOc7\ gPx< *ckKmLMvHv*1MųPOчM+?FTԥS)يڏ9j j:^tui[b*c\ĊW.qeLPO{HK>pw]}Y *_QkK_z֩(]6Zat-jȳa&P2E2RGQW͟y wJB%zSFcSw YܴAGh! :^NƓp7?1$ɨQ;/V0 6]3>93Arߪ1 wwZw/^Է-uvYԼFV e>Yf),QJcf*Xk2+78}mFz5: z:H=]Q<>$z3(ٯzZƨތPK&Nae Y9fsr{a%(YXojuzqr)nq]xrPt]87oc4G#.OG0: >MZ\C3^|7{V`gs#rv->=?^Sg /ׁaÔ0'x~5kId:K(rhkئ;)NJ_Mo6f]U^a2v #XGz@Vo}]~׋tM5lyY(+f,Khڎ{|4wA~EնoƓQAcۗmՋo_|-z/qQ`~}?@+wָŭSaC>bos_9#o#!.,n|3If?ԭ xQ 2?%gD'wKUxYJNrgBxS7bfK;tP-@x)'m݉[Ϸ-]H]K$I$ykXO IdƸ)y0@TLBi/`K >w5)f>C"]7ug6,T]RXkEKVnjE]^ji|3ȦάO+1{Xa0h`N3o_٭AwCv :ꡗ,˸~Ip#~bR8N04ۿxЫ2og/(fZ߲FJys!`އƔD\k~[Yn3JwϹVߺO}΍%I'\;gVx A:yB kxmG]Yo#9+B=`:dqYZm/\|83ғ\ i' CQ"MBP‚Gȴȉu" :1#b"qNvds$ԇ3uBcD[!(F Ć/W'TT%RQBm "f9ƁW\͗ri}~j C:c\1^-;v! I*yu,٥6T~ù9㗈ѡ䵻ǽiSTc7UP_\2Chc"1MP )K$)P5rL}γiwOʝ飑{db'1NZIs<6HʵZQAMS t3Q*Q)rИL1#HN)}aYL*o5-m]G-[Ɂ[K8nQo8?k<Ѩz|i3i}Zeޖh2_ګ;[Vy˶цGcr۸^+ùUELD B/*J!$7 MOJBd 6QA% TiXLJnD)VơP'Mߛ"ެm]&.}uRJۍa\jZZm]jjwvDGB7RjQ2e .xE-sZKB08 Yld*jzdȈ ) <*5!/YdKU օs"ʖNJ]L&9"y1XRPh [DY"nZTKI%@F$M&2ZF#u"2r\(b.jHȍNM򠀥lg\pH*Ȥ(5l-]7]~rhuf 띞|F1O_+l]?f7w9l͖"}y6e-Uwizx{Mz^hy=G L^Ƽ˚)gI=ηtl:+g Pyi·?okbj9N&q;̭P lYz(7+_ 0H]D-g80E#B܏}g޹Eg޹vd)ŠU :4Z= L0ap+ZKt(= >Pi hfrnsqDUs,PKg,&vz6qI:k5-*غ9vRHS!\ؘ)02J\Rr`\= =-1-n !J{P+M4؎ȭZV mv+;(Sn4 6nOy1h/7t&\A_:t܊(0 %pLHڅhp`2Qf$̻{픅J u:סk1hItyy/P t7SB'F?^>K؟|`܍HJdV0ʥ\ԥLz%mrD+B\q&M+'dq- CB:pxQAz[uLP|s&WL! u".$Q}md'9}C/{sl%*WPWK|*/{r/{:/{ݢͺɛs, !bhc$qO<-9 T ^巓e&P=#h.1hdqq,<"2$Oogo.)q: Mx(Cl:=7tpw?җr3h]0EUTJzE(j"ٳ^K${VGg)"?`$"s3\eq\jvsRrhB^i;'cM0gk\^߆3N br,  ڜ eov*q ~qaqZ}Cԙs@}뛁QE %(ךkM>FlCor׿5shXKT)W'޹**$TF r4+a`QI=>%KdL"U` ʷ6QOl'{HYSޥ:ỏʽpjxkȅWoAU= Nalf0Q Co<Κ|\gDEYEDP&_>}]+~5L.09V-p2,%LNR,'W1W(d]׏h9p}M*S®\eqp_\ei5iB)%!ʀ&WeP`*jUz`0*K;/c؎}&j.n\ij/q ї5WI{j,\'e\\)g,ۈsϋozoY(֗!.XT`R**cGΓ3 [T,r0A+YFX.),dORh&K1Y6yÛɟ1S-S)i1?OL?c}63avPICRs5Ohc~mLE~C/vj壧d&r2K=ϙL!Ts9# +K hQYNAHlT9Il)$jvao)>(S:@G3|j~t.Jjbq|y|3cJê:.wZp?Cʲ+aS` P_d5hr*}sۗ 8`}5BK.>" [4Nrm𳯃=ٳ}x7=פȲhWldD\2oLMGt,<,Z؛=-dѥ".nS3nHH_I lQ$)ĵ$eioIRR$#$[P /ݣ).|pº^2XtH@ msJJM$R@khu:%* *7F+BG kW; +<ߓP1Cﳟ3,X#f~Zz-ĴER1 1S/zg0u$r+- $f;?UBu yV/$DJŭ4hH%T8EveD3iub/T1Hfjb (w[ ښHY"!("Jj[#T cumnSC~`tT]JM;~Br ,dT5XzԦ۩Kb8[̗6f޽ktk=WBnmV-KW uӓ()cU{rjֺ`ͦ7ՂbM/K-rzFSwXȐ/%l:qhPĴpauz-Ks>y͝/ZעU'M買6gomCf̭9'Yz(7+_ 0H]D-g80E#B܏}YHP?䋎bG<=eܹJXAQA'FgA4B>& ne\k)t5ԈB8%ZMEx7-Wml_ϕE^=;ncwE7)cqG 8$XwI %<[5Kϔ6=.2f45ZSmj@~hc𯭗K(v?vͅ R¤F3|el*xQ\YReFz][s+|Kpi Wq6S}n*/qpSlk}I8$E"eʖHpЃ׍j|Bx P%#ܳ=;{@*RyCz:#<!'Ί 7LG(ΝPbrVtdrvlE!}'Po[|[_D__M{Q? FU~=hd8vHv#bzt`@YfpYfpYfϮ*(TN][(ĩl ]b!3\G_  >MB/ I7 *Cʒe tj@$2qLef]Җ Ҝ/pi;r-=kO}|]!u;Wy:$`wωmdU@;Y) ;$^_˵TѢ3 ST21Ϊ`'!SI gIh\Y:xe u2z0;Ŕn#Y&6l.Jv'LOzz :0W)ix\=p%8*:64TnČ /L|<\Ru܌_"fO<0IEpTCbtNMxW,Y)̫ڋykx6ԅJu!:׾aI|/7( w+J @.?O>Ȯg3S€YFq*T.A'cVHٕ*V)U,2yir{,Vƣl]ڭOǮni[:n^)a6ìx %iujT{eSTE<ݝϞk*dT@5yX}9i=BPYxYzIrU‹g) ̄IBg)Xd%`\qMUk6r@=OG 6pLVgTl Tv,E-se[]zfÛXfǛb }|3z3h} `fALWW7}3rx+jfxJͺ~QҰJJ3y͌_uJ FP5>H t9Eu'}RfO'ݰޫ{ 93k&[r~!zeT^JtIsRe̤A: Q뫺ɁepF2`XiC,ǔ⁦3D$76E+&Lvss=臻yr9ѽPtl#𖌯vHX?Ćʡwi D$ʲ3+d"dĕХ` UPEUYa,B.%.$@zC$* rb}4hYʲR BNRNGR UZj(,J9=seР)*E6Am")6iJ4 چP= Zb(cvs(LYL1"C&;HLdy iQ\M C'c'elb"i^KbmrCHʗ. b¨A,v2vB26Jd*ߵs'u m nHz80dz"/SE1gZ:4i,c#%t&w&aH27`ȓܚhX.G6NZ ς1U"R)RR #ݲuT,p-Yxo00kgW#g;$ < P:n kg\1}d^aObԘࣃǁimC+.K77Y|eY% n,eOH3T֩?h7ӃpBN~dH/ɧT D:*1F}pLj'Yɪσ3>29xcY譻,f>9!6vr9EKﯷ3* + X}$fcAsP" E.[Lpb:Yε$*а6{""c*j"gY߁'mayW=n[|G<#yqe&+Ha^,$n:X]y&Ñ2i{Y{wP/a^SZ>7iodRov>~ AFMRHIYa BhMH9*Zo]P:Ue$+u]$lEX]ݒ}V15JdIQO . PaLDy~B]MfYId`P馳AF *X4%-HH-*ܽqϕY(]aUuvugKeåW*$P)eK%kHf̞Gt%Y:? ^7so&dMpXp*$˜))464 19AG:A+HtDd&hHk0Yr'#2)Ġ 5VYB9r(ڹ'Ul":>F 1=Mz,#N%JNsz_[֠ IxMde1R~^y.0ҳzV9k0$UZKݛ,$|쭸%5.%pƑ\b3.8agFKe8sq򃻳TYf~l5KrX"Ȳ)\T~AnyOS49skS98t׻qS52=cx7T{wߖhQc dZڔիI4Oo̜IoN\HO0ܙgf#9;b#Kbܷ0~fz/E/>-vF:#] #ڇaʯ$1 fbbcM7:`G]dۨml9,r߮G\vz׹W9?*~zQtQM<^\;ѧ>_? />{8@Fᬕb"O_154Z"0f5z>Ọ+kr˸OG{8ufaip$/_YQ٢1t,8YT"+D6լM KL*Ul8U!(>RT&o TV?z%?$wbP\|!g)]RrCr E8(+H!3)IzhB5S#'C ]s" ^lV><}ыlҲ[ԩsR[/T=^xP_c}^}c 1򗎓 7:VjPBoul[*Zc9U-6$Ő,B?l""Moъk06FJpV7ܔ\qLW÷&B wҳ 2[n͠gN^&='gwN2w< UF0 d* uί찫h%ҬsԻ7.v&ŧ^YPz_/Ӱ7 JJ=t=6MT wWG?{6~^8`ual0>Ёɾۭ֯ Cm칵k WB#9X7a h^ҨPhrX DI;/ׁyzzk`J65x ,j!,! ,1t x*<cBTwժ:"b_̹6|͋]j4ߗu +8WJ[УF>G>,v@gg 07 ܸK656fڈ LMqUIm :c(k%^ZQd 󛞓LJ.%@M)z༷ qYd.s.{gk~v|-h?/LꅩH,%tTb2۷^%iRA* 5h!xAT2P*#e$w)%`ƊR$5 E)RSմvD!9%QEo PFvM)'KpnݩNU=SJ:wOdt7Mqn%6;ē5}c[TV ^5ߊG\U+oOhe^됹Qzp&{Ṕd14VJgOLDN R H.dR&ǘgYrA21[/Ks&: s.þQQta5WʺPwpIQm'ivpQo{nSO _{>De:rɍ))4Vـ 0ei<;OY}4Pfi(IEmJ]Jݽ.K*6LCV2 k/^"g<];XYkc;8 Vk0; w#j$ESUf  r /:dHĂI`жt#:W-t5réS_jGb<X?ՈFF4&1y#9VWH>9i:;cE{ІE(IJeXU#Zi'۬ рE $472aI 0Wr‡Eٮ^&zԞ⸙ ):qɞzx]Ӌ^A$ 6Ql'ώJV͌{sNGVӋЋ'kqǩ=f@-+_]koG+,.Q2`,NvMY5~T˼Hd+C/D E#fwMOw:5r| ~\'uĄd}T\pH($Ngoc[~go[~go!fsSʼngog-?{-?{k=Ʋriw#UM+<^r)v zP$yʳ*DɪhcQ{`lmwmzSkEgU0;Y񺘂gTAJQ 9J 0 qh kH"3Ҕv'2.1q2SeG,h%UMɦL>bAfI4ȔNwdF9!E2q\ ] 4-L#0K H;SgH/Uk V{@I4;W+4&Br֫ (T  }*.hACI>έ 96%01+g'0xžSʭա!9%hd,We{pQGAvUB4 L&cjͱ!N8>ozjwFf ֜l3u ڍ;pgwޮ PpV]B>ɖqO?vο۩kkܞKy_d d ٠dd0"w t\r&c^[\p>* ֱC)nW (-GExJA6BdHYTL6.A )q–'&ß%.e A0eGʑ5T́'t_a*%n;2 _4HPb6D(Y%y4GMELK&9Qr1Nf'6 cV4`G#2)Ĩ Ĝ4}W9B9Hr-N!8!*؉8+~2ZG4m8ggq*좽%|1+~Ӽ9 SS}tgxݹKԯ_j=tjSُ&YI݄ѽ~!qIpxrʓ 0:whZ3)^\U· oW*œ~uO?OzV$/5{|a0%K[e͈*i&jD=Xz~zѣ6񪇳vinuZ]\q歎J3fHiX`K՗# r(c~;IP~X)TӘפnri8ɏcɿ?~<ž/c`y"$ѴjijoִAӚ5ߤ]#/ifAn/S˯d_v)N7k&YNĦi~^[e'N}7SBT6bFs=P@x_=&]b}tK<HI%[HydJ[ P185ϵb l6t^UO?pg@j!I`PPC/S@~!a @:N BOͿgDSd_WX=WKϟ=t*||k\-Xh'.-Ҥi?)y2n~8;6(?xS{3 /E޽|𣏗K~I;;͉_ {&dן~xjӤ4:1͋Z2$IVZe*YFg;o 8޼7M ]a ](#/8-Ӣw1ul{7/njzerzaL*;kM/0k`fw;rM[j:?PNܡsr8b8-7W;wfSv/eAUGj(@a$* *3"|Ex;V'qv)V'k>5h6 h3?^0d[,d,|*c r (!>F42/ɏFߟ_a0CcvZNŋ`kRZ[|xbO`\ruUЩɖSSښ⢥0%:!h3) q'7> Ɉ‰)l6T[S!0<-6er,* "yL6gij9W3)5ccly%Jً.lldl MúPp;\4Yl?<2)Ο|7ߺ1 S։཰(D8)Ьp$E d Z> &CR)u9tRqd2uYCp:G,ESaO˃MmmXkVkvWFfqja< `Z\^2ì1XŁ,dBȊ 隄,jFʔtK6>2Yg>r&xMXhĦǶ5][js-NWl42tg5:y KP0lYΘ6@1*emLD.z&@5jn$ɒ`ȮEM mLgWH/:$_gcdKYՋ^\9@ .Y$lb)=Y++iw \f<1{uZD^܅^<}8}eyFף1kF~ 6r?ᐊ12"`I) "TE/7QL2s$ϻ[YevMUxӾrhA<:|u Z]u̎H ?&WWOt^f ir7ͺB*YQYծ5^_45*z< 6ny1;sZ q>YxW0͇gE7eZ\-yΰQ2}ဩ0R[nm'@6̝rju5ЀYD*4Ye3 JLNo7,N>Y橐!`$YL1Z>(P2iV5O,uL S+\-7R(J nI)J=pvژ8S҉Ae9ݚ2R _Cr> ?;ng;#g2pm LV"E9"FǙ,r/v64y[dM6]d*J_| 8[o[fOf`9_ 5iR&B r%Z_ B#h ٯ@JF#8Ζ`L Ί@A0L'mQ{/8$ gU$Y$6:7r흞vYJ/nU j퐾Gw.-::d B"0CWUi9tB"mj)k2sۣ^R&['tJleMvA'n; ++C+}1Bg6J5y3Ĵhޠ&[.23[As0 H 0&gJ*4ܬA/@&a"*fmHLI:@ZxDnӑY3s 􍉳a@0Xђ^)v˄ VYfgc2ՎgrBKI6zϷ r9+idP*9pl [h6 D Vؘ~she,OqZŰ`|,V8GtQsu G@9mwA/c&>.0;/յ\.A%vi/<.G Z T:@8$'|7wI}OnK\ЈELw[ܙX]OiL-S2^rӧhG(;oR˫T{, H6,-W="*+2qɻ8 5Y *`nɺ-ܠ#E./嗛6 &vQ!ϑL~d͇ >Ze9(*'bO'֏los͎6:8 #?eLqsRh$Xj#2Ien.hiJ<gF 0^'W40N˾L7qv {U}<#3ԇW}tZDW'@?8nuTBCeU4\!YK2=եzw(YE)HLf& s#{YE.s(DJRdJ LbJpxC;El0{.  6A+Zب3v,&*YЦcL݈gӸabҎCQtf@nxPLbZRB;F <.ȊM^dnbNx#9L̐,9h&%0A2%}|2@Fuu~ co+8mLjhDqsLRE2B#*V!l42TvjkT2sQi0'34m$YiQg|DHJkH&j+9v}+qv#⧋DC U>uv%"nqـ.n FK u4ųc!+iV~+fƽ9e5VpŗaW< @؎Ջs7(Clph{7D)ޏZ0yDWR"cBPWHJjB^! PΝW+yYV ~ JT*#& Ch~If||/6vbϮ{bi<]Oyԕ׮Wim;u_ջm g>hV=r Ք"ɂ*G2Z;+t"# l:$˜22Zo]*CL5w)q!X$؀g; VK O229D\2^pKM fLF`Rs( 5A&uqT6 .VIh-!Ԇl6fcQk4fDr4YfF;D/{|l%MӴ`LkO}ԟEeK%,I}dϣb*+x*_Làkig"y=dtpN[t2e^nH9lHQaIK6 ΐ ۉty`] uPE&@ BQ1K33g In3σ{ n-Ie_hk}I 4cgA>7)1g NiN?M#;+~iil/.4XO|)5J ?|Ǔ %)Y6벍'ܐ)Pvҙ'\+u'7' IjlQ,9$zcֶrMIkm-g+:Mu2s1ő+O7KHSV[mG%iQt>O>U_LUV]^:qQҨEd vt獖GDlmo6aѐ0ܟi/= g76F7|뮝wX[K-|m͈fL{3M٘K/U|6ܬ:msaiUުi0Gӊuߧ][}8}@.1;@oG^40CqY4|v\$HVQ\}Ħia!,h}᷽>Bx[b5+ Mm{?ܵ/Gw#6ҭ#$Wn$LT?oʇNp\I>ܾg `$}݆UWwz$y˴SQ`F.]rd9f{߂N6Kd>ƋllIn6trӫƝā]8z0]2KGxn*Dzu#ՠı;RC}lA\IӼ3Zх3xfVYUYsUrƻpw}F6wlwg٧"uNCzČ9(IBM;'~#cFL0dk"JC+U{A?2.W4u Il0ާQ9F.t4reD+BL 9~:AAmctih+lW+\BmJ ddr5}[Y[[ zd]dm]lJL 0bLkr: +7~!vbE-"V |ӫ<ځb=bF`\iYfΚd"NČ3bEFfO< ГuȘm(0 `],Kuf*cV2^_fEP{> hC SJ[_I3/绯?^]yRG*+ΈNZ"8PW>RjxM婠 0`5Ȑ%6Dnvbe*#LV2:֏ W6(};JrM j?Ml e)(+Q*%Tf[ƣ|)#|lVdp|Ss@i^weNX)TcKo ELxȇuݼ1DjY} Ia!<c93!+  9,sLJB"kҝ&_~R[.~`J-b՝}n{Q}%j /X~8\QBr2aa]4m1.m1(m1#m1cJipA W (d3d$'oܗ` %ݐ}:MЧ:˞rQr޿bp UzާVEu ^uOH rxTբ;Jev TH%+*ױۺAͷwWܴܣ捒j6Rv=ޙwT],x-7aתzU}o>^-*uTdB7`mnkio#ױ%YTb6@'k9؎%9eu*%H"@X|MZj3jf߭uyZJo X_F6M J$3V&]$ e)*gS),rAB%0#bJI.J%Œ͹@m, XleubE T7ʜ9-.)O*mYsOӇck驭͉ޜ®9}~uD#x8K`6I^Lx58Y/Gx)''o,o,$g3̽{xbW@=~,o9b X<ۘ CRԂnAY1Z㻴}̖\ť8 ݲ_w58ھ9%G Ӎ]o+` ךXތBhRLf #ȡ&o,8 2plYiR,!8[PRD9`-y Yu0#yD*̯AUivZ,Gt{Ԟzs}^VnϮt$}hG(vB(:F.Ȕ ,)khY df^snw~soO =Xϐ}j: 3`X9Z lBJ"Q!Xe{Q)yY*5/ =Ԡ\bb1̐k2]]ח4ʍ4s>ۈ~;\q4=K_._<߹wA@,?Top(X] L AĚ1^4v/ C" jla'&go#!QrÔd9pr؉)̳$ldN?ϗwnOx~z[ "sG1!Ǧ5& |0[d DI*4n(F)&|"ӂU2 -fg[rMiJ>c-`,iA!:ޓY\셑{}s^"p#-Rk(E4lT]G)6>BynWeչ&U&$h1JCj1 Ei)&丿DJ2-bMZs68A N1Uqv0UfqZ.>to>^/B.s\\Ѥ@P0N.g?F4W1MĠ!I >)A{%iTs3LUƟ+HrʭĐtZѫ|d &%_2C2QhN\:S{7 f l7ksTtOmH6/]6u-61 m*mTN =K9`vtQ)̣#^o%󘽘މ$!'۪:r rp0)UbV+RTk e9[Ug)ČZ׌HXjL .&yd. dψ*>UthL.Gŏ2縣ַkT3_Һgi# ['sFzYd+z;ꚼYuo[QYֺMޣ%ʏWKQ3PjJٹ{J8dF/Ӻ_1&ҘM@SQ3JI[S;:hdmj Rq9UaᰕT,L0.Xxr7:3#yr}xveU# &WGl.bKb5fr' X28(&@4PSf>id@?8㙝w6}L%~Fv&cs0R))>eq揗r+Ĝ jZOE< =Aj)cPKdtSFdq%4ΰwi[<;űt9 !+3Dk]uD.I hP].fq<\;mAQ㩈D\Њ9!d x`CM(a+f!&#Й!<1[cbkod$p +!F|ͥh$ i\i8.eszWfnjќrVqX\₋W|qJN+Ynx#SZ Ώĭ6+\<.G5NaGFm ȣWq ;@eEpK U?H \n5ayɚ:'@ є%9 M0. |}'- =?b+mIݛO}V|?W׻ZORT|{;}ZoY_[L (=Ǚ\-KUYu̒L #\OOA>:srI7҈)m u [ Jfv$cB/NM??]buICV eB{j{fa܏fu}[/FuQYX JPK B T[nl%XziY]?Y#75RL(!P*՛f{9Q*߲ZFYHc) VgLaURٚ\S@,Q Bi` *,McpՅ)MJx}t۟o޼׌B=gg}g_loRE=c@fev27R|_l3Z g_7}ß/d^(ٳ_.ج@OMgqex֗?hgzH_)襧1]Rއc=kcF9Hd7XhhJEddDayW  N[lBtK ˻8|o`z0N $uci:90> }-tG&B%e{Q+rx4(2TBeI!h!Bs=(|}s`TaS0tVO&FZ~+߼)^|;aL;siwYg3;g$$g'@|ө U-1%ƗtyR,CP'4i/n]&V Zj=U+itjI s F.U}5 fIhC nl]hcYY_\o'FO^y~xy9})&'޼'Ip)ܺ m=m5 MS4-P&I!h.P> en@~]}{z2Y}Zٞ q/$>Oe68sUEvWZlK_E>TFTWل_/kE,DGQN$~p%ɧLDː]bE\(a҅]@ uq"JֆZ=FOXd]`@s$>9tփ҉,<ʒzr3xrF?oÓSLqύg%p: פ g&j:Rjsfȵ1'HkcEymsw&to4CsZήhO+A5*Eȧa#A2@Y^ wE%KJN0B{b10P9dnpu oCN(&P.m_aG YkB ooo\kI}_fWXOv+XMtUc;u=\\ZR·)}́r6ȝhˬs+7i=kvr1a`rL*g(PLs%r_оǽXԙcnڅWY'vm8L0'El਽Y'Qs-9˓Q 3߶Pa?l<2DwBCnɎzɮJ; 4@cd2;\-xy9CTUd0:/ ]A%Svim.hyr͵!,( ?}\@YI3p^/׽QZd89/eKF׼+UQeM&JKkQJ'RԵ9j9뗃`t9mZct+ڨ',x#xf=LQ"wv}PҾRlfxc`)oT0y+[n5%OSAZ*Cxp▱?gsKդKU5uԼ{k%gF>Gɢ1]¨@ՏE9W,PN*h"w GQt$~ lp$U%\m2լp] D>~]3GTa: 0*& χv-@zϜƻ-P@#Q_~?gٝOg+3r2/#|xd;ܨ3mf$|SswISem$lWo8Hk4,vCQJ<2 :B*XȓFY80[ .Mf탣ۿ<,5o5P@go+^U~e9"4K@ɝ|Eˁ']y\ yr'S04'CsEUY ̧z| 8VK pH j T@(%Q#h08\ x=lcJkM+seA܉l\e`;h.{)huUF6D}dʇ&u$T%""GmɇMNP7÷EP^ˌ&2cHH~jj*\[!KcBUl -8 .yv>ۥXclĠRv9cC'ow6&b SnREK$N%$LbZJ#,T#|.øTܤg1)r*μ3`=վs|NuwY!}.&ZGK!zjg>OLj9Tk%if;AuR3p,N:fn89 _ݜh5m;'?w X#wveƅ ʛ>War%΍/c nu6 Y WlHY 6g*\#ls@ڃLuvuKU.wW%kW{Ki C"qJ!S0cs9Ʃvwt4D{,z a}:bqoRҜ-Cֺmopr9x>LЏ ,]]6@XZL`txJavjjaQ>ͯG|-9 sY04{"Sw vSsXAh! mYHPkWHnesUD;p09.h8;cd+Hd{#VMw-/Ԇp>ݙ|t 9?aX0b4.jL1 e;΢*р #uV9.(Gh{+E6Z[@ yNy"FK嚭z&L;o][MT `+(!|@iâ^ $ڶ CJbS^Ԗi]FR""'^q<-Cֺ깋..zf?C a;J=9E(F+`cއqyKӫiMN( P4XdhE%M(R!c dBnk9 3rE؄ AVQc(yz$.Gg-Jki.8e8%Z5)L SLÚs""x($Ё#[6p Mh|BA]koF+vv=n 4v؝F:tXIv3[Ô[%Hשso-.(tB]X.NhXDG D[CW}v_*䪣7HWB**?ZCW:+DW-o<]e[+I kHe## ӆCƳ("CBgFtF):#-Ҵ 5tʧDUF 7HWZ i u$T-%tQ"]n]!`CpykVD3Zx(UЕر11 CWwZAkʊt%+cJN*گ; F"d gF Q ONm+ ~.жUF+/2JɽHW@fmN3`[CWz~h\NWe$] ƤR-+=*Õ-t>t(5]E2T|t{; nuy׸%+j`+q, 73Yed;vťbVg y8 ) l8L~r,yBHࡘIuC 38k0Q~SQ1'u,_S'CӼM.WW71?y>_Xqp~#"hai)<1 <0~R+kVw˖.&Rp]ؐ F)xԲFBײ۲e#xNBakͬq$#հϝӂsAOATgCz)ЊȒC2Cr?zΒ{bZ6U,Xk*Õְ4[{t9cM~ ؘ BWh( ҕ6B@`ڳpkό utjJpt V6k}hCP2հSt%;zlӗEt.UF+I*vtv芁ZDWX8֨PPj ]A…m]e=tBP'wt 5ڴ@-g W2ZiNWe;zB-ZDW2\hj:]!Jִ;zRF~nF_7=xde/؅ͰͿ帣x4(v &()|1!;5R>s 2&՝t4~:UHk;Q^],~Yǟ.KeoOqtr/2N}?)rHǓ ^w%EtVMT/P]+Y5E+rU-WgO#ﻲSx ojZ^cw,络%?# ,2U/8 %S8w aS2M7ް5Zxm|3!1s~r\ܻS|6}O{rw/jN'._=bZ[~}>ZQݛb]YٝOefE~mosݘj'Md䯊8&t?s˄|ڡKJC}>ZʸۧQ,磝R̼ah3yD>7` I%WB;^d-8%,ۜsYdq@1jrzrB:&=5 2]OS2Mjn7qOWAiRTUaԷ(npf%E;4U~fD ~S??AR1659SbQ%r=|P|̉O֒iס%7vZ}i% PRj"QKXC#w&@8S Rpc)trc{J3U9i'weu8`7MQjfNH ͩ]]^?A Sirlxa9兖F3i!Ku7ʻlo}s Cay[ޫh.?#WBTzILؕ}km9;FN;+hɍ'/B;*) !ΦuDVqeEH  pQIET 8H2)FSd(wh;dar PR˃@`CTԪ-pH 镈VŴ3/W0bFE 4N/-ez9qvU]qrсxháeߧu;dbBd WۉgV V MVwѽCAqC/uP.*i܁QjFĤ8 mt1Df .4ѥzu1xx` D8.DxP IfEy:鉶6S. Ύhj1gxрK],!ޕđdRꗙ~Hr,gz0e %U4Uئ=P@m@iH6Pdw"΢B!f̜%9ʠ+FD^Z_M7ֻ#J*MhEg5CC( ~Y /0E7}]XI[>z2> dhE+2*mKN{Mw'n{3^͌:#_տ *q1/$wxxrx}) X ϸywr1OK8~ZfTIZYVa2A9atO޲S BD ˇԧy:-2"j9ϥLJ!y!KQ[1ULssJ X+$8m\bPYQ6ģ`<Ř4RF+XMok1ME6n;` E}v޶4ҁ`UvVuܠ]nTһ\/ ~ zݿhŚt"b՝ܮvMJ7mJ1GP/k]tbŇ09;}KBmBRTQ V5r h70;;_Ĭ3˻c :a~}/>5=f^rhQ/0Zgh.QF!~50Z0 ^Ff-7s~]\n76JހrA&m.{Orma|Ra>$/ɳ1r 0Do%#SdVXSjءHȍ*P|^LơױNFȱݣWeq\1Ș@:|j˳ bQd)YuC:V">"?Mګ>`b*;6Ӌ:\rU};04{~: [~&ǜijLԡLcЗQchD66-^JF ?ICcnbD`! ,r)fCVz;Y{l_=$>goHCfA>5Z9YZrEqsG~"g f6 d#* %oү\M(cruu04(']4農@~! 7&mx )oW-vWcn8 m{wtay烐WPhtQM k \mXh[iҒHޓ!U|V$lڸ kUh@JX+ b1xE2u`XXɻ m,oV?6~H6p%fM`EFnO<cBT r u)+OZk^fݥia޶`UJh<O8|*[7(e8_1jōIkiUk9zf$) zJiVWzQEJj,Q3Ok5%zB_KB]o&![EJsE] iYr$O%g'J FHA]&**jfPtnIJ/?UxHDRIkKfCeEF~EM'i䲜ص=ίPMN^ռFÃOgWm2,:2طDdr:W[ILa-GUF(Zu҃3 ϥ5ۦ` J ,`n&^Ⱥ^ښcYh1K.NVыR9TKǹHն9{5RVBS>-\QTmvYf۳iӫiX8<7M>ٷn}˘u;'&IDҠ`A@` 4+ ˄xv.PnsQ0齐MKQ*K&FɷcL:Y0vTd-r[l?53VtjE"qsmJGrB#( QW|4rtv ZPe=h"$2̡jrƴ1ke4 r3>g82/;R?_AEC aT4meUϐ$%MGHdjf8SU]_wWD%UŦ*.u5"вvHo{R=LJpwZ%x')4Ǚ0Pg]''mŝcW[0ajIT׾8K ;Js@Vqؚf8v?m'2x@}o@OoSB٘cSPASr)ՎxXd6j}D* =?sUoZKQK}MiCGU7W@` :b&mmFAظH]ȫ뺜wƣDqμ<

Sf+1ҹI<޺̛:2"u, Vd& l:Qig&{R9`Fxl".it*Ř𒣻$9u~ ^g}z|1OҲկ~maUbLe/3m)BN>_YR38Fe ƚڸdkͬc+FX}(k=Yߑ}b1&% ES1]ȍ I `XE0X/d}skڇD>MJou8>9\Zn^(KKFYV<\B85ڨy hO 8\K3#a&qA]ERac*+CğʹNADr6BBtr4H"=\DpXz߽h}G;1TZe`}$\Fa| 2oENeeW'#Ds }DC%ĩFb $~>=zDd1'$7*u3؏gimQGTt?a^;_'z8Vst1!G+,XQ5p1f:c+9eF˪ C |CuGɫ>M. T#K2QB9aDZئgnxuLġfjI䍣qn D鸸Z0>nqvettre(&fV.:[Ï:ih__vprv~xPm4JKwmC.gw73%vPzUwy^Z?%_3o.'o:f_uR6̙2YnGH5~9QC;@Ȳ/m Ζ5Ö77sSTޣʆ8)GyWn &N륭2C.kuٳRA)8Ӑܰ@[_6*Cz@qo nCl0NW~p~7|&~~{{cy}+q~K6¯u@ :Ux#fMs hZO=K>vV.'|mϗ_^K1y;yzDD<W6ףPWۨg-Rλ~zW+hК> fnG{Kmwc~6b%>K< #ѭb$s2Mep"6W;CD'R)8@\/pf&n]\p  "pL,24?yM. $;X2pAi|WJ5Kܟ.Q*d}Mvv뇞vg AԊ<]t@vՋ__i4]tݮ} //_tQ9/ᑷyp(xx6ڨÂw_X6l]͟p|Ǎz+nog&B^=!V.zc^syq4$\x Oo#ZAmk " ^z; gtm3slm]?Q;~e;uW͏g~i+٪+T4ak:{ TTusyfWwaԼnAX]!wbiQ^ i(d37cƮI%V"; $Rs] ؐLo3xP}ڂ<yt4:U? ^\gvIZ5  4eT`;c$:'-*uxy@e$\-Eѻ/[i~|9 bSiٟF=/To V{,{ݻhy/Z]-vuwEpxQjm,`$N=0, =-|XyZsyv/jD vӚE\c5IIOźbVVs7q>YЗ:Hb$@H3 !(DGUڥYޚGZuy>^rү>ݻr9Jz@YOLCʓ$eP-T,B}0ej sDN)}aE-֝5\^\ܥd w6Hs).p8<4ob;!@i(Xr\a${zO:^ B̆qg,b)X"$j2 ]\P  ALVeF$HEdωs5E4" 685I*ʃqCeTG$@!yGF?~\%"ǟFaU)#z;u.Ou3 #V rVH\HH@B(asVx%ȽS+ٔ}kM}b*90ϗҖEL%=󏃶$?ߴK`ѝnwZ/˰,.!TA҇*&hd,  >gRX2rT@ dZJHòҞÖ˂/n_tzl?A}{ W.]i+PO.·C;Wg8 Qj5"V ZCJR꩔R+B[`Qlfvf`;0xȒǒx?ђ,QR+q5EU_u mN0{TGT =z~l_UJJ_urzXWU]~ݭ_LَG8f:oH.GyY)B"TΠE(w s*r>gO[fJ`(AʅV()5hXC#w&@()Q P)1Z:L͗t{.~?ıLeX@f~^=ztlF;|g2DJN|Siqqlxe9啖F3xHgGP(nQE E* y-i eD3iub|yҥ+ήM)[JAQ Q̗mvM׳]xf<uf G=>C2O5'[W֫th5iwW[VrǹyYw>;ޠ祖v4ZjaΛF 5G%=K^QzϛnZ om7NpBx_ShVmuls&Aޮa?>+`$G*M$Zqf+S[4E @)8)ŠU :4Z= L0ap+ZKty@#ځ5}%DWVDT"*ZD) 9JF8(u LaMpǕLASDZ(Ujsdrcg>%8AYR<ìApQѵc~G0BXu* HNDО~Zf?W?Ie18ѼZ'q1WIS V$5"&Qf@h!`4Vp.{udr^ޔ䠶e}| nf2oguyӍp &#ŝFAi`&bDa @D5 h:yyƝb`:*Gı<s$6t2J5& Z;͎~#W.sw AYw`eg$1귄9\[ "952(Q$L ȤFqǂtSqԄ䥋3 F$N,-4_߽fý߀?yߧHJdV0h +Az#^ eAʆWwJe5_cvr{7%'/_(̋ސ{U[EvąUmŷ.s+JrcfwcQqa]Mz8pM/˾$99vRLPL`'S*+٩*WJ-W`+4qצ<3zȏ\gm<߽`1]gޢ|G"Ls8Zz _:}/obו܉r4KWRUkkTfքNh%xGSeNU0`!$ j`8 ecFJ6*hJ.{åԎ73 ֐L* af@Qae/KaoVlX8CEy1-<[w$ӏ17,hB=o5ӌ3("qVYǴWplݧ[w0ا[lx` T4\@P="s" &y9& qTSW2c3oc38i 8DZsyrPgu8M|$C%:'솜]*9̎ Tgq`ٳZh*yDlByt|\-_qbV"PNz̷XABF8Gn5Yf(!G~̫wpp?_Ødnգ'4j\,F8<@Ʊ7J4퍦'yS%U"'EH&UZ٨BڡAALUb(d lyu~ I Mm=Nnbr};( hKED[og?b|AV\Qۏ=2؝oBz0*+|HdP Tf/-SjJ`/:kRѪ|XM2Rة.xLx;㹈GDqKo}&c;A0hA>;ih$Dʜ6NmKy)uq6%NT8Cdt:ٓVد֧<?qq٭kHkʹ䙸-.qqŝUր;Ā&^yGX@%`k)_c(x$/a7Qa[J~]^~<(\QN _'X[ T2S8c@jq9- !19##QWStE* X$xfh0xiK>Oy-SH^R),1t,ӼSњQw@`s;iLPC,tVSG)>"ȩU¥NJs-[Y![piS0Z/,s*2k)M" ~lwY3H^1jas} .Jᄬ}.h;o+? 8$,Nhev9@TYZBJxe Wd$5c=I2n}HxHys@CMYW 잮׳]rhy<}2T"_~`17g#_[wֻ9[͞girˎ[nv}m󝖻a1osut;CtcUsSlrΨwQ7M-0tܦA)y o'7û x=7%܂Ŷa'?A <\ ,²瞃HYe4iܿg)%֑wO&L4|1eJ,cFe25:X¦tP)TKÀWX;5A"H:bisZ2ֹ=eL\̯"g[(ۜtd7iFgtp^`1䔔lLl$vI;[ r&M5Πa&8KEҿ89ң ,[FW7kG>s5}q:Q+Ϥx^噽Ȏ^yF%аN7SE!J/BR*"4?` Csgn~=|O)qrUքʦ:!w wB.軽PX) <VLj6S.Ki?B8A{7^⥂ht)xd%\\ )z u(Xʁ)RE'-,UT^g铰Qn|3q ]PO_Ґi}c3ڽi;Ӎ3~??s[_6?N1 !Du &*g6|,om#2Puy#0  Ƨګ I+H%5$^s%hEH R/\"DE(ѐ ;+^280-.Q_8qox2/$R(h06j269e L6=,x|q1гzFywgU|D*+Ybf#'+bAHe`8] Fi<8z?yf+#Ǜu@%P K(SL=bI:oQG}ŋY {cO-qdhPDZ{vg_yÅlėZx5u]uNjDo68* $7VZ8@k,Yn(GoH- &i!;^n{8XIY/Nj~qZ㿺lV拋X拯52FZ_ٽ>5N,r-?N~_^0K'r-?Nm0J a&5Psfrv}[+8}3Qi\wY]^S/hUeZ7<6pjIlZ6?>܃=lyzzϡ%489L)Aԁ/C!Kў1Q8y~'N(Hcl4 F 3=YE4Yv50 ѷ&}dIfImIuDQxpQ"'KcLC{\~aWvg<1^ӧ*Wܵ}lbr6dhAZnz2v%1#_}%32ꃠnϻU 辰âe&]|ԡDiK<)M(}t,Y{ζ;8l2*P 6J 6Łp9ea#(&_Ej;(HNQavH2&)J ##?NQ农l;w% e4̃ RzZ%9ۮz_gqAF*_2`uP0mP)RɲEml@6ȱ wmk?~V]ܱ4.rFlڳYϳu``ϕT ZK6vP9!Qq&0sw_hZ|Z 9\A\khyZ(&#I`%'JJlG#dQBl*a8&D [1j , mbLT"JY*gХupf3q3KFBi[??Vµٲ')+QL)EyY-|].&ϺX:e4ꎴ |f6-2< 0ɬRH/@{ }Jx٤ ɧDb2%/dXv<  YL'Rf'tgL]tq]ci D-,)SLQ(K%bY՜d!|(q JSS@QJdh" GK9L:Q(BV'y;|~U3\{5z;->EQ+FAzOd@kvVwj vןLM]P:;2]QFQ=.uھʮ,` Bw^/-@XMiÐJWz}כEa7ovA޼dLH8UZ.zɨ ÒUR[Xjtw_ׄAlCgO~/6,Sa~6gOU,W_u;c٤VRz{yxUwV/O_:YkmG_J(􇹞,ӻX`wHlgls%Xȉ:v$,RU_Ugxs(xbEkttyq &ԁW}hM#{MM Z <)f tRhD1(H:@EGooZ|k!(樁H]D-$LhD胁흇a-֣zѓe<\sb*a Ek e5mÄsV:gm%.[q5><ԥ׋LK弊aiN/zxi],뤧`;*hP(A%KGo()5Kϔ%᭷_"GSymHeicXV[shƷ}P(K̾Ys}ځϧ|!<`peEH  pQINNo༬JF8(u LaMpǕLASDZ(Q*FYsvuOmZr/` Q  F : Yw;:5rȅ|K!Y]Q<lJ* \6 J3D# f'"19}Q]2/l+^1bPB=,$;5'K5؀ʂR(mV+^_j|Pm(~aLp9\[ ARKȠhD-\2 sf ^x&EUO5ϫ<q~dI[6wIjFr(N%z$Hrc'aWiEdnU<1Mq>1gxрK(E}!DID~&{uԷ&,]EGMh 6CC PԶe7t-6xMbKo_;xTs{V(PިDITF3+'Qˣ~׽oM#ݢ÷9d:q~>~)Oʜ|FD<SP\4Sta>)n #=a'z{0R6~ 19ӏiåVԜ7OF+)8s>Ycc? DZl2MǨfk/߳18)61RSƷxC9\o䚤Ȝa)՟ʨvSCG.kj^r˜Y]_SfXt%:-ŎOfw4cٗ;0My;&f$J>+6wgF`3A&ɏU=&~~^縧1|&'uY*6ѳlo^/;:Zطuev}ko& swg<֝~Ҷ:`{{٧D#n r0k 5K sg< *hIzD+nYh ii/o@fp<\y1`` KBiJmTJ䲯4h#Z^鵢2t^گ5/lk%{{ae93F7u{:H".켮r -C)&{?>BLJElpQg=as#/ =D*OaٹúCEcu' XC'D,>2E IxyBCPBu&Yf#tҎFj㩎F&!oz'ZZ\F0e9DzG 6M'6}!?.>~n`/\hA&Rଃ^Ui:^+a>y{z ߏ!u/[)zp4uR̂|ST<>Yl?Yu*œrNu5aa f .9rv68iJF &%ɠe+unx.%j|p/lp86J,u"fc!IתܓdJPBZA=N#`A -[#~nf6in,Ǚ!ZK/*`k,n/w{ٷwily̯nzQP* D0L ϕ cJYPX"L8J V"qe 2FJ%5NjQE!2aC61M-Jkiq߅ tGR3Ne^D\%\(4hI*ڴIAq(^(* .j ,8V \"HT')U1A:%6Lgհp TYgĽ-*-W%cto/cZL$,r^>YMdeF}.)U! A$2!FcỊ論o:t&;A;e܁uw`Ϫ,9%Gq}1\~*p*Td@y%,">@e{^@yVZ6Z7<( (QxO0\mԎQ<\EH QZղ};$.gJFɃLWIi 6 *vE- B\d P Y f@C ޜ8b#̶ >OIr' WB9!Wu^RM:9$l K&ҳb78k>)ʀITA0> 92MeyvQUڼh8hT,BOAgAA )7 LS* q̭kd? ߪjUH+do͎ (-E'PHwy'N$F:p7ëP?8Pu}٠.6#.(krt$#;őr4 +SkI䍣qm# q;Z0$7|y3:>?<˹ĆgS:_׶T#k{nC''} 4JKwm>aLq#0ˁM7N\g+UVu:QzA@t-[t5ѵ$m}m߻jA,١_.q~:wpVZ(w<5Jȃʠ?ZZyMTx*j6(*DӼ1[ftPn$lRNhK$!^9&/O"X}p>AS\Q]tFvITJb\P.J$AKeGZYnze˛*_Pf y}Q hKMIhfNre{˵uȱ [dR=?rus>/:Xuyٻ6ne,K[eIsmIzp֍,9^9{w%Yd ı+rHo3acxEay՜?&bFrt Lԋ~G, t+Bdh`6~`X!IP&sJFƆT6G]][Tk<Kfڅc.::kyN: 1PKSf?ygL*'-*u-xق}(W@,@tqpߞfqA|{ٟF=/k+]%߇O,jw?{ehghs_sزXpB{bgnq|4lNγlZ3xtW3͋y0kiF*YË߇W7k7]IU㻉OCa]XN2^JЧd6ScζheƷѿgs]G1`(ڠJXR q$w'=q`:2WmG%xXٲ$Bǜ#vI;DqCPbliQm|ڠ3[BtvIk^VrO/H++ﳃKvL5ԱNܺ|p3øY}~hWerjqD*!>T>%yr3u~ ɨyqy p ))ՂdJΡk(Ebh18o3 RHF%S69k5Il1lM-c֚=rlqUzHa\2υb rmjpTP<1.s2]S V'j 74Z$hH}Dg)Eoy&O/k/R]h&XؚX8OF`Du"rMG:?ɳ %Q5U-y~(UJцGcrk^+ùUELvfettjU !9G8ehz2g*$X1PΈTrKHm[gd*4cW[Zj g嶍g|\aMjUu] o^(`04W/b;!@i(Xr\a$3zO:|HZNy l`;8fd>#1A%vDD02Wӈlh XcW[ںڝV!#ёP⍔DLY oqeNkI:48(C:xAjzDR@yThkB$^ȴ&OI.LU:*6Wxkl5/!fø7-"-,bgi2r32KٗQ-DlrMH#ёsp^/b́ᬫM]Wy]~3.NqF7ϊj^;.F.qq!/^,ċ (~Hم 7fV}ViTVqкm>Z3obb{o/ʞ8σ s6u@4o߾*ο7=F U|&ۍW }7qt}mkNک6i[M4,)5t"jèydV# Z_@ZT2.$^ZNy%$LwG_)*;UGT|?V Rq%*Z,RI8kIkQfNPG!g>jm}_}su~n(z o_t{o\+Z5c׺/:^W=V'IK|n~l^_)I'zV"f7Z%}LK*.sv²?.g\Ӊ)%x4:^An!i(U8#ʼn%y(l ;kBŝG9|&+}ɺ@9@}sADI4x `sRGc)'vYԿB$iS-ML$IIUGN͉i !8:_tQF&Riy1*A%N-0KhD gAAӣgN$$ S0!Nnvurx>kku qc4_X|1wk%?8Hf[L(L+8G/o3; 8c1o AdCÁ_pWAlxW,7帒*Cj90J('wAzJrN٪npݫh{q`ɳZh*yD\rw0Guqak3RQ&vh|~q4Ҡi*s6/a6LFY.+\'rN˓7sT[-]4ޢg˫;!e3a0 gMrsӳs#p4j\_~7A~ ʚ@,|U5lu5zu5lQms/U|4\O;k+ked}FuU[jrx`ΐ\@碯XRb1Ba]uy\7hN^'O'/_9L7?qj[)z|]S~Gբi\UKf'M&fAlzqa_L#}=pTTPΛ5b+rAq7"j(7yhnwّA} u=;t_G Wc$#~ep,PP+щT 3=&Fz͆+>n8<nW8CA6Z{ğ& swڱdםrSžPlM'vL8>F ]ϥrWYhJbŠ{0/XRKpt}  H$A1l  \DˣF,8DJ Ņ@H©I@e$gSG'LIExb-oC&mE+O`lMMZBͼ؂h O}vp/ɵ(m `,_;\! kCAMO\U>&ՂT,B*}0ej ;CT HRXy {DAOOVl:uG9J-T>j.ӎ{o.SɕRCٚmPFrJ+k򀲯AfxHdRQYR|TDc 95MZO=>$T'OL.P7?D/Bܪ9Y'!sD&@.%6kxLLBf1BM[i1K.zʾILE)ڜD缈HFjlFz\VOBS ;¥ZŒ6i~pKiMW?yƟLgo1D2fB3(S.X͊[x uFX(9S`QHQ`SuF1Rf_#jq|AZ㩨m+P{`kGtb61΂+y*ia<C0 V3FF0 b MU$Ek$& x,d)pqS}k \ }'*}TuIpC e?j]ǧxn"66D$By"]"Q"E"sTeH%m(DT%Di3De=w]yE2E/MVZc1<~fN dC`Rf98I An+H1H=pjosff'a]Yހ3)ɪF.Wnߣu1Kn ŮJ^@TJ&gBJK$3 ` j_߆R=+ UF-A jfM[i*TqO+ҁRQ*,-T_~5V!@\sOQNrIrS w6R{@ޔ oֹ|`T&U^Qm&NO]ȧs ZI笑9<# 0K=,tz 3fx_^-fs VB{yϧ{0u1v4ڿ&>/oW t>|{iLqK\Mҳ\~Ykn]gYif+Ekf,+WY2`~SKܐj9;FCˊB~I4}!M:ѱE-L̮&9 ~q4dv5/={#j{FMB7oLOJʎxIرL !gIX\6V3*9+찌~1>hLD4pjAY 8Ot1hˠW[Oͽzh,9ivC,}6ִ8ZsKܶ멝#3ߐKHZIZ7ԺҺ^==΋NoڬO\:;OZ6run=zm.X٢;-׏x9͘Jh=7t=wvu_4ݶ8LYh3UoLaH{em6,%Rdξ/m'"ıLS䞼lP h1C6=cS!a\GhODLog Wmf!`yS\ h*j%F7l0q2Վ dzY7||>Cy8SowQ΀+6`GAAUHq&+Jc܋0apJ۱uwR+BmP?4l. bcC|B '6zFYP FYFxr㼥j0YߠV%#!y@FMҡ>4:+"h*82*^p䢕j #}WmMb-yb-g)Y+UJ])VM_Rԍ^IkIB}GY脎166@jmɮ!|U< (O z}z6{RgiC{3ϋɖ -Dp&zjT&&'L1^IeNAGk'>%=@?Npܔln}DUkM/\.;$DF,A#o.$R"g̔wtA'lve,wx^^X4Y}FhЊhLJ]f>Xp6;cg '0=B:pu"gySbx9ӷκL-Vq(dhQ"E#lbFVK5@8p.@"0"zWNMdF˨>RxZ+ YkTvXWYvܾ^]+=l<ʮz(s5GÓ<00OK5ܹ6FKGR9"915N3z9*j}2I_Jԭ5G'+|^px/h|g^I{>v3mٺ>:B;0/紫ċ||\rU[L9[uh srcjPOyzے27S1Ѱ\6^S6(Ƴ`yvT>HSdY),:.PA;Kvgh_aa2N(ffF,![LG!:kMg߁џTr4oǗ@1_b:a5J1M1]m{wAq ;Q7R3q6Tk~B'~d2mJRr|12͚Ϻ8ޯ| [ ,*=Cl츓f+W}v{zmc۰p?N#FH.sC RLyDE Eo}.۳_.W-z\;ONHFdscPAKON=9=Ax2ث!z\ᘢ(CdRDa>%4(5kt0('?,M>_L&۳ vn19,s5I3W~Y ;~vLy20Y&z-,_^"}ʕ9BD+ T(IsJ 0bb`ZG%r˲8 H#\ٻ6rWX\и|fSu_.m>*ƋDjIZ^o{CR"%ICUen<~:5^$EI9# ##?N֨&d~o_ƫϳySjNZ>:bՙ\Fn}Ʈ6٫+ƭǗ |湾ЇĈ5W:"3^iKF+] 鬷`=3 +Ae!gH@0 6eecߖ$/+M(c2hhJ!_sUDS6HF8tlNSL *=`/&A2]uwYI,^в<KRLPJ?@*'^"t٫@5gU+8:vRBH:V~`@ij6A;oc̆rۮY_2&RiбұiwT|׺]^{ m݋auv/@IPW/%]1?x_J/r, "4U!(c)*xj3DDL P-Ct ǟ\;32{Nxsk[vΒBЕ, ^0*A`[XqOV`)B,h7$]*ôD%7[h.Hm"7Uj :դ4rǣ9W/lSn ?F9u//Oɗqd?7iT7Q\5pxQ`j2Qĺs) +A*m>b1\ⷷYC7hD09Old@d%  vO{TvUpV61EaS/& eR=:]ҀguJ;@\uhй&m|x=|??īiSAzǶXJPw^\Ә[k1JᄬQt Zd @Z2*i:Qei"NHI*@&Td$՘wGOr9[ST{esNRSȳRwE2P:xC֑=6dbbt<~m{kl:ڊm?`=/=v{1kpz >fގIGIڬrM@h_ځcowU_zv8Ǯ5*|}zF?ȫ-{]d墳<{HTܗ,ge}µޑq'_zh:IwCaC|Pu$on(w׌q䛛o\"~cEO[-Zܵ|F)?~e|?JUu<K)Z˗N}VOބ]P>%u4,B6'8O =э|8UIf[rZҘ~Ns]Q F&i 8VQ)(9z)tZAEK=<-礪=OfAO (v#v b;A9sG Abcr%a`MvZuprJJr@ $]pàɢqFʠa*jzHG"z}}[hj;6`vfby{G庼8ݻ8{hڼ!F:ag #m.[aiy|``i Wޝx-..)ӯ>&+Rj0Bn@6oB.}S\Bҟ{N@*?x J4hR@JorqH*-cB0ڠJnxjV]N>ϕ_%NWc^׺"nRn#jvlӌ~?tr=4IkUj'g2^쬄8L4x#~4RzXhUiY&[Ӓ+ָ&qϘyMiRs,gs~9L^ܜOVOgqr;dYyv+h&e+:#].8]-|s fmwHt[~ati܆ yOП%KXs%V`mׁBîSbsxuako۞xǐ5in5䚦Xc״K۶<5"svI]^;m_NfQ/>_ЕZrtqojo;S 7Djq-(]<|9`f .<;ar3L$a)8*ơ,zc]ޠ 6˄>:BJh`z1zUc9ofy_g۪{88|^m>rg& OV21M Q Q!K0RO%fA;zxϡ PxL̀i jKl$dk6Q8:V1x=c<^^x"&Q ˆ%fdYE4Yv50 M5%($bd$G"Iin=0@pyDNV!no9$ɽ }~}jkX?ۧO5chu+O7㡛MfAbJxU)X-gwfzP. HAu_ d1UiM<< xZB_:z?ՙ >dJϺ>P!Fr<iAC WT<{&J%x%y|t|./r'ݍU\ F]rϛ45 ϖLZKDCQ&b$lrn昀_ӎS jx_ӉS?k.kngYsKD/B1!1d+)H7|S(Oi^08y/c' kTBrRyoُڥi@Nt[>zRՙ\4Ifըbux~hKP_ }H(8X^IB)g*9,[YoD_naye4sV*%k%5Є> ȪlMYIJ$/+M@14t4~h][F8t, Xra6|5  B*+E ZgBQb QJI*WG=m ]i*'^"t٫@5gU+8:vRB:b[@i2+RdY@" :vB:6ͱŸ*Z׾wV W _*e4-nfV.b=\Iոdc ,#%OUՓoI$ߪ:m-'#IJ1%bY!d+{hcj[1jM_@D$="rW9.}ח6Ȓ-taO) W??V$cԖ{/GpliȜ#sV톟,鲛c1U'͆xK̒I?{2v|}~6^Fxݐ lNNpQ(@#a)F0dҰ"@Y S"K1D ˎg.C˵4ط>kGhr|aQmOh36&|_,?t~XPyD5/R1PJ]B)`(I0)^J}!k{O`TXr)eՁDNAk:5x+zzU~셡g@0Ы!`lC0qTaj=믠4όN4*|e>pU] #Nq߿}Ukp2TBU$hp~&{T &K'>&nT.ή~k~}r1# CxVK- 9C۳T_H5~f| Ch]Km-}\ YߌZ̲a4=V3~<^;nsq0Z*A:u{VgrMs ɥ@}1t&TX%8=\w޽)tfQ{r0:ߧo;o_~SL˿<};q=0 Z" x#]5 ͛mд@]>7ivkڽk>_9ŀ ϗ~7Mj^B x䪒1 i F/©9RNnv,?nK gQ}Zh8; /pYƛŽM_Q1Ӗk$+|"&ZB("3casVc#0XpĬ#kx(aKO6!}pZEj$E Hxɠ#bNd{,hʑNeM5QԙNlYhy}EۮK؅|Ћ7ݪLi;X3HۙU>Myf$X:-$תoKKO'GmVN:(om7b_>'W B_Sa{ v %_&-}bilջIuY]dH2RUA8<~tb_dl"  &#![eSo?p8[+36/W+ Oۗ/xڞA-5ƤƺªyL)1\\s+Bؓy j4YVS!Ol8]"x sPp-F~[e;[myR]O(渠"8RQyI7A8dNt4(pa"#)]d+S]0OF}3Lk_H>x݀f^Fj6}ͰvAN>6uHHXױf2:5h#$e{ %)'7zaՊz5%(uɏV+Q D-Um[e&#QHYu2LwZ D佖豉hj4BZ":v ;g{!{\EFoOfȴiQ.2xBccc!4z*RSbN 50$5I] Hx0r>=h1I(3ebroDul󰡆I-F&Wڗde.rM'W9U{M1@w$ :9+p[Ύ=p?n檽f+J)*?Z!\{j)1[Hc":93p28:<0LXd>Z70()&=i{Gm7A*3F٠.cgJgXؙflc,  g^&xḦ.q3՝ߞPpy8huĶcK}sk Qb&'Rvg%RSg"i8 Qu0 !{D%$ؤ"mٷ"h/#vHHLBDNW#aĶ_ 1ҎmQ[uڪv`xn0rZxD oLbCR!ph`Ql y)F.qVq B3  D9IX Ai;aey$OozTsЦUdf̒>S )ۖpF j}s5r?7AO4Gh6*wvt{%bC?ĕPAZAľB?*:$WI\@Z-ÕB1^++ f &X \%q>J plKKB%$]=N\*'-&zrJ?tm_=ZRr@p` }WIڧw$B"X%z8>kRN,0&%ij'ru+wWNDxkXV2ʣ&bon' V9=XJ@Re J>QR2}'~йMJCEͯ?}÷J#-yW'GQurĭ:(B^I'"HCs2Ke~Gq[Qأ(Ntu*Ԛ6ʠHy)8RHdcbW!tS)3)Z~ݛ zL-D:oL^P-4MNܤUBleDUlOog7o fWJ (7R٥͑l4x-|ja-C'F{b|{OænHs7eQy*h\'4iꉞ.]SWvZצb1:M֐1~<)!,ٸm&Q$3׿q ջ.|׿՛w0QWoo` f`, ,fb``뿽]󶺆ꮩbt-P&~iCnwS}(_,|-oFԥӽOunlqZk&Dp%s=򙯿lK4s׹ /f }aBLӹiN/7]/vGģb$|?y#xDːPbE\(aƒsjl8 uq% .Fz͆qgFOXd\` p$0dБ[1'UDM4^ݙN+g:0;7đoϧ۸|s}*һSyRyU]γPn=m~)oG0}7՟~+u&mt#)6gF\i7ƘlR:4mdyv-m^r|.|Abu_[if_ӭ˓ t,Q]i7[ch*  *? lp6HF_,i/;^s%8JFW&+>r1ϸZ?߄0n|?!KQ&K X|(,[6Lra }ӫ//èIoV-v[\Ʌ{K}?[-_>LZXzuC7\)S=Ôu0RwܺbsJj^L>wLn;BnC:g<'ڜrͽ^ڈSg^G]1zFuyĒEq4rK18j EP%(c"pFkhreqփEĖPAP1Bs4]-v]>ȒCslHƳxSX4:uSnFچЪS4-;1<*Hj||2ilHO0YqrE8əNVdCg*']JZfK˸:YVH5BjJ6*N_^ih|J!^+k)L@yM>jliX57V"FMsFV;|rݒpq>s :}tHTy. a)!a`av/Q^!~Z+kHom /!*gRbr叫_>~z" qdퟍj!ZPHnesUD;p09+ p 1UJpjխa"{&CDѩC O@xnmh6m^MϫUTF IW81S6,Syt~`( qo\Ǡowϕxuen=xzI}kӗ8W;M1Jj9i敷)Ӡ D{H 0\DhNE8EHs0/V9bY/ HֶB{+<탊8]횲~*QH5:xL!V91 r̷j XxMZiZ5<1KU.TTnsbdT|:WMѲ^nmbned{YFt9ox%1ed"&N ah/1P5ҫn陥}//܇Qq󱵢E#D2i#Pf\H4W8ɕ{HI$#bx5iۇmK6Va7.Y11~E(³U\YFg_ &AwLp ,;rߕ'gsq*v)1GQ!EBZZB z|s/ZϫJ/'N0Mb, }Mlp\0rAV3J}}⼥("#)agAibMR.Ɉ ۬&"dF9N1P*wxpV[, ,vA!:9}2HJ$%9'2ER U΅J30.mE 3$&NxhThBm6hXyLeoNxfZMŀGd:# P|\Q,L c:L$,}MdeF}.U! A$ 2#.ˏqWGߴCj?|G_@8O!/)$g ӹ;0Lg ɒ%;pRSPA z'{^K;5E _Z4F] *0AP|1SA:,C 41Bv zn,$(L*$ <#:j4ҝs cJ*Sׁ>sY.4tBc\{Enܱ1?~"[Wfb : -oo9qb8keqXg=:VeWů "x=y 9vF~??ZIOLmiTV-긤%EI$)+kCQׁyMrV8 }} Bw//uxo֏ngPUjW}SMﭠZN-uCZyS9eX)Dqu;L ݔs|9N0o?& ^ YƝ!7n+Rso&"߾'\s : j0:z|с}, \Y8%9qXG2ZF#PMkSBhp@S" :' V'6p Sy#rnݲ7jsKisTڡ>vyQw9&#{cp?F:P"4[gAՑ N%jr/9CErY#Tcpz#jxi{4H#4̆ ZKtfI,ԉYj&|vM r^ ~T|~ :)/wT*РPJ/ wIsHZgۑǷ 7Ք J-6y:_2߉bv|;_'/cd z?6ցEɈ臍k,GZCGD ~N2lY=Ђ5TԢֿusV+ o{P.K~-AZrʠ\ ƽ{ځ)XbL"X " -DծET5^XP8(u LYB+nNJPjyhYF!NMH\kJ=!&jiex6Ax4"Cdݍ{)9Mٻ ش=Z:uS6qkhֈ~eL%voç|6VHq0b_ȈUǝrw3L !@.J@*S6`0&(AymȼSѧbt;-Zr{?wij(+#=s4e`k`SI%p(KF3kNUJ}=jvs9)S6^;}5d;!A>L0ň kL eF{7hh5aP /ЁF5 69*cAy&ΥQIl@eA)d C4H' ӞqUokGMXh=4>;cLp9\[ PKȠhD-\2 sxQ<" U i8?$5G9h'OJ= $ba'aWDdnUf5٥8^XszOA4u9%DpQx|H&Q߽I{my I}/p/uZ !&0`^Bg?(3o\+j$!A+O`2Ă'N+X}v2٨$/XSh [RM Y3 +K (HUМ(Ƀ ߋΙNx9W2~P}8;4N9/ oQϮG~cԋl2Mu/ΒKY-dOZ;A1eX䊎1];eJF.ge-ѿ|}nf+aDG3?NSZZS3,/n;?^ٝlD?y8吩#=CXM\a]ؑAZ/NUwpR2?ks~} Yw|(GUd9c<# !wE p0,Lr__G=Wl9Z'q,a*VhF9ٻVd QG~Yܳ\֢\NIA}Շ|(U)&p}lWτ,rgo4.OeT}oO\'>ŗyMRLq;,')/,V81 pCQO,ӧM~t+m`oӁLPII&ӦsOfU6p#fD/!~3Q㏭',x0t+f"pOǺ7 Tgcu 19C$xX!^0}BS37&y"3AzP *E> (@d:Ys>0-NxG$%2X+PqR!Fha7 xVhI&Dei\'j]Ɩ#dZClzG^WY鎡_/0xr GSݗ(=*d9޳-:tb.`Ĺt!AGHCU=t!{WBE+^;b`@ Gh2Xa"I/"Ohs @@c6$LZ{NH]@ b-N*Ks=Y_m2ut:=ps w{oo}"y|~r,{ p32DZWZ{:?P%㖁$K)H@S:*6F}c'Y.U_xs!R1x+2ݶߡ]# bWya}Gg<Nf.sC R&&Qvz "Z̅\N21zt:9εs1I ԭ˞6rcPAKIö#g g}r2MǢk;zq[>EQn+z.3ȁ4LJ_/7cN]:at(+ 倐5%54Z w>b/IYJ{dGYR`p T F rdt lQ3SAEY/KvvQM:묱xvy!ctǿC? {o?VLSEh"Q(nu?ߣkVߢkia5kOG#8ð\p TJK|8}Xr?Or͜1:Zq,cX/kߪH,pbdftd"˭Uf]/GX^>RIW##0{`\!3ɔ.t\w5Z1Ar1}lX=vqáՒw̠2s q" 2~ &|$y^d'T̕b N+g:*֚NÃyiz(unAQhsq买FYrQ3e ) %7C{d0.r9MkB04(it_Y ЊS@O 7V>{sgxeo jw?{އ?F˖+.RVmWw,kiKdHks; +ѸXnB G/*ׁyp=zZY}PD&>m|sJ!,! d3 ,16A4ExO1!C&mPܴB]JFH[I3/v3]<'iOe3րN v"C> JYp gWC,ZuNR0pBNh6*ulZpqEf1:̑c$:BǶ&ΞaB[Mv<[ӑMr1kw84){Vj0`\ZG.S Rr,ڠ䘲JV'jf(I\ (iX0+^~jrxH$RIkKDfC5qvOtrl:M) s+PcV9ڈz07xz YlK۾qF:9m&vT L2*o47k^˝??MŬ >X 7*j%~m&˜^=SU BUX'ߕL*<Yr퍗&&QܪI=ؾHdDy8ٰ)Ik0 Wbug=_3,'ZU9*B0Tcv N'9@F(]T:HNbn Kb2d/#c"\7DB+4G<7+H1H8r[g$^9}̎LCoA:|w#tZG=u_u Juq\[z|W'c/f7VJi܂Jx*H+gTZAk_bއdƒu=uNKPɹ6,=zH8 R'SiPeN>iSTCJ+br,ITYm:xvp& F0BDEe0:wb93We׊Akd=j"}k5Ę:zT+7YWk׻öX{BPO*^ϟa @# &i+`YV(fUrV~_\\\n\14O&!f+ D. +˞wp̅‚\Mp%ΐ?My6ˮ||޿*Z= g)V]`Bۛ.Ġ!Ip. 04HwVt"kf ﭖowWܮɖW;HϼE 6=h8b:W"J-\y-Ekxŭ2 =R/5//xnoB9h( ~¡P蓴y)B bzE7WqOq˚{JKv%=ԦSJ\=sJB-"tbZu\)Wko^?.y0/뢖tM9c=}?қ~)xxNce 5UReo07G-?QIeN>)XEKal bR>^LmQ4Q?#ő%1HTt Kη \tID 2SN{4\{0 =*Ku+"f5" 9i%RpnF2Θs}6wY~ ɇ='' vc7KTBZs3!lf2Yٻ8r$+> ۼɈa0`v2ƀ,Uewk`lYUrIVKJ)rfK)u[M:*>]TLR ։9ΆṼ^˳izfY 81A̐z4" G+dRQitEQPՑOa<ʽ8oN|e8޼xZ_ PFt%R)WZjؘ'Lf/f[ Rz76ء=CC M7;ܤ>^Nxo !:7;7O CuN=^=PG?r2xz/ynA'ܛH]6}qq+ B & loBbO]w:㭮[h<1]eטן)eyܲ>k ErDZ~`k)Y #U>g-EۙU'O\2'{#7= y2_VvV"?)9ɇ D&?<zyEC}E{Q‹,: uOtQ Waynf-}^8i+'1zϿ|I6;xӿ~uSp꾶V> YQ/G}o粯ɥF1^-nˮ1rjz)P3(]MwB'yT՗Wɪ&R^nR>^)_{^B޻#&Цo#?f\]}6i_1Gi]iSPh zTGTLɏ^i{Hy*Q!yulQ&D9g5[{fߋ=x`92 SO<C'˔@?2ξN.baa0<ǯtwvqˁ.Z3WX9t1R`Ahzu6X;.FTOD뷓ܸVvro'}oq;YuG\8y[5ʙற_Is=u4:ߒ#MZ f؋A?=[,?Y}8+ͥ57<4'>ϙ]nNnU(UNޏޣçT1jE%ځ [!gBsy,4[ڃ!7zOl,)d*P)!Pp2dx.b[ЄIx@[~!R|oj_. "@#Pd;(pՙեZ2RQ=hO&Yy;)}+y4#'B}KNFs/Ep3y!~㬷hF\ )@iptY;+uBp@ Ɓ=V'Xmf3%8bd5`_q :]pZGh4dQѾmK 8#)*JG]-F~TF+639A'ȝ1R7q,Nrܑ"";sa,{Jto<>78ο~n6.ڹ8RLJzM\_K&-LJmMޏCφ_֝)23|Oes-g, XO8Y5$LZ2fsnjNzIft*e7~oL֜ h)Vd[xeu5 %E-]iRr 6n7>H'!5EY!xg2Xѝi7qfBj%9jZo$gI4[E 﫩>)Y?%[K!d55&2&82*R-ߜF1ɃK.arиt+jʢ2RQTqLHL*Cg]&#C LGS*S 4 ]8 mZVdG >⸌M}\0{ Zg]wk?tV w/^h5ŪFLO[i_VY}{"y\8pRx3J1vBb$*ŘZ:eSHYֵ5j;>D#h Sb,^wXZ(:mO*h_zJ5] sLA;8#K- =1pמv;pK2/f?im1C̗E|:AŇwgs~ @-AR+1⊱Hf oX&?U* ,2,Kg@Dsf)&3*]"V/ijoUOyaqzlySV{Ƈ}8ty㥼 NyV< ItSK EC6y,ɞ9d}!(b1Ukk-b4vA!*#GDQbAnO.mQ1%q(JUCPzߋ=\] 7^~q_9(Y*h{ԳeS彔/\6P͠W1&MɎz{J6'Oj2=0UG ~,lU V0[+\s ! Ǿ2a&߾ /z"8C>,cM_1ļZ})w*G)PZ6X[q03&K rHӟoeڬY L-rе"{#:+Jjί'"PJ,ŻjP!R2Sd& P*g UۚuFrv瑾&{BPY.c4hIyP1EcD`Zmʘ\: dy^4R*n:~%SNا)vCPz%ÊZNG/T3]1_ݚv'SiF{^.SLϷʜ\5_r}FQHvo&-ާ}Ζ!ʆfcI"eXs zan&z 5ʬEY0W# KZE" mAbz[n<"Vf ͌Cmal l j o$j϶ؐRīNkϋ.g-6vA/ɔLeQB0Z0HUp?Fb62kٴ>$ P)j Tލ%//Ŝ5;jlgwƻEiZpśLt4@c[b徝!&FA C ¢-AlM.*yS &OM%˪5g)!&p$0N"ZDliE75z,|@KChB|/fTuAE®P+7.ꃶ9pf$-0iAxd8MGtyWSŰV):͒"vlgw'S,Pp EkTzVV~r@Ui\K>7Ila'c{͎PŲ|b5g|i 9^Qύ&0WsC ^Kύ&;;9buy-56`=!-l'e10J6ԦRw1B`#4@z>%Ӣ+^U@J*1T,L`侒ؾO1kOmZ^.ٯ^L@-o:?rzez´͏"}8E?5?LȘoQXUp[5'f!2c-grSi%<i|QLbC z;B >,wmg޶БtڔPRH-ѹ8zoR08r4/[tF@|! Q}%ll"_'nMһnnt]2lKiޮuKhS7?u|F'ney_o4 :cۊQ!T*nƳ_7 ~C;uF)oG7Z|zxMnw~xuuaMbvGR4|tB_kvcrd4;x@|5oi諉toMk:FW47:|?M=sr:8zkUbwFVRu6_V#U0]VռꨁƱ%#_5FRr/Fl JR2 IvaЊ>Hd spxwG &+2%[|sףz5 tj5At&yVps}z]gG[yn5mBvd ѶZ$Aœ4?^୰r9{55*Z8؇\mCghlΚ|E [Fdߋt{wPOYŖp2~_:,6|]OЮ;qG[sR&Be(BYu,~C.(Ug(-Rtn.Rm,cv3i0>Q[@EOlt皃KG@F;go;_ϳOFd6PkzxK0 5X= O߃ok k~'ӆV=0'bZIr&&@$2 E1AK=vFPr|Sf%R.grI?KBcuHF[%UZrL.eSL%:YWjR. rw|+~qo{VWfK7{OK/v-NgcER?TOxk{zxzwPl~Ogaܵ꿯'Y~M{lo\Rm,ozOD^P&/&+y)Y6?,MCo&ˆQZ[wn͙)OF7ใE)H G-5]].&QY~R1ԁN@ן~|a_LC`45BnjaIiɐEV{r5ˁ&W/%!=lR\}peg^\A`c^\5qzp)p+/ /ʽjҒ{pդ4W\}]R]NҬ=\d:m-d- [J߆Mt{{v+mn¦= qz}4"L˳Ehk766 ]_HC\~]Qkܢ;.QqVCo n%oѢ5 LqSV䗀ū>bSFY>#2Z݊q< 8Vnڊ?Oߧ3b3mlߏx} եbW4NDGi'E,2j#j\&ɠ oEYʟ,(GK%j/G%m,+q)ULBxYMT\r*[[2$+A2wq#w9Zb$]"h%T(CZ,lCYUK4]jE6!=_Zށ>eKVڮKUZБR! *-TjjJgK26RXʪrȒubs{I\DȆ1эU)#aЍI՚#IJDq06r}X1'p/VKi}TBȘE `ն5 *(cEWaF>MGk1X\5jY6Nݯ+sBz @lB{.#셧GiX.ϧ!MκJm{bX+@I#dv4 &^dUIZQb%rHT,s 3z[T흨䪗1(!"ܧu}Vһ!0y /%l'P4W<8:еR:CjZ$&GhzR!n]*,Ɠ4RNI;'tHJTa+팬j}#7 Č6Z+&^a 0fU(xGkE"6xe@BR2E Fc}:& H9d#|,5+(_dXkƩЦ ngtgyfD)P,`lMd G-S%ǖ`lUIOX ĎqAPՠ%U3sL)e]\ APjzlᔆYosQ#\C,Z%X@rɱ;(ؒ* TJ}¢\wvHa+܂S嬅.0WK Hyd0c½hAv[ځ@:=,g0!.Yb(37!r% 0.`S^P)P,a`-WR5DH2;[cT +,,p :&|0%[ LB6%")ll@(ee `I* p2-X) T3`pq s8e^a, Yap% Nߺ SHEW&[+\/tְj{PR}YR6ȮLcV BB)%\La8RZ/V +\>$D댉0Odg:cc\9n)Zv)mƬs|-b DXs-*!`ce`; TI|.,K@J=E J @KE405n<&#q'~a"AEaƒcpA&|Z&x^0>XM.**<,hXh= @h NحNMp 3pG[.aA,TW >ON[ŸcQHY̶ )ⲒF"`;/ uʝq'SIR`QE6 ^{@# v)m 4AJ ./룫 U\[*ג@Zc222 )eC ""*#+Y"PnP\U1dH c]8|֔lƚ;L6C:f8MQ#i% vЈPJ9Sܖ޲(EX]{7h[@ ^B +Ӳy L!%ssmZ6ft||2.G2mK]֘kZde*n\Y`$LSfùq0 lEAApu jkyj ΃˪A9 6<('_{fҵ9nA/7Cs>fxsCIxВ.;gV:(R(<XAB`:T^z x""0j@=aQ߰^$*dO+ NHEs8(Pbz9]y9f1<[0baG! ZXcxB8#>Q^3g9(+`~?{gܶM ?44.δ]oF"ĭ-N=cƩSEt2 ~56mLUr z} hsjomcܹ3anH }>U) ZUw&S1z ZTժ.̜ZϦ^}e?Ts!)x Sf^Ki9 k =mZWw hPxSàekZif| =pT{!Ѿ?A ALJ^2T(Tpt ]OQ%NC \J(Эkp \ix7!Xw:;JfspA]լLxЬ$.VG2.>C.\]Mn$xJk|U5˟Bu߮h!н3up0.8!n0hAGT/b;[.7UN-2X]LrV-q  s}3v XF]BRp$h84>F2ޭMK+.ރ|D JB t@&F: Cq$PZI!&I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$Ѐ@(%<<$:2[!m~ňô@CLA+Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'YHc!<{Os( 7ʞJ=%(+ $@@{[Ed@:UH3 zkD3 tC. @$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq($|$6%$ t$H& Qfpb&HNq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'8 I Nq@$'lj}z~)Fg jgNow?w4z;ᄄW!J#(% jI&6d\`84ҭVh GDFW]!s )6D]A$@HW|+՚6RzQW g)b^ ""DZ} )b] PWƩ=HWX~q+ /`] PWV*n:++ V1~\`%P2@%NEj\QW5J[Yo 9lVպė$W{Gb|ں=Z=*FQeUi\cE3 MՄց5*ڽ%u=7.oo/گ}:>ݼ黼K_sw4V#2x9^kSi6~lR,g6զDFF̀SUSrJۢXZhZ4s[ro?m~]~پ:oX|Uryw}l{6_ 8ϋWƍLλ}fO˭9:fsdf^uNh_Ϯo28)m9g"T`',`HM55!TbS-zgn 9S^zAt_3*yVrSo\4 |bө6oJjJErzw1?}7}Rz_1]'?.Y ̿ovj+nЮ?>k[RMCt'pqˋġ6\J̘ygYso:YCUuMdg܊o{tT7W(i楩Dɤ+sk'݋\G)=ȏ%'碄 2NyMuKaKB~mE]FoMY8Nio:Uүwd/ŧcW@aO$텅bv2TL!7-.V5ԉRRBډ+XK4tTRZ@ZsZ@L Z%]yLәc' +u&w]!%.f0#ppTt-u<%pe U Jz,]E\>EQ:sPɝǣ+gѫNIw]᚞uDkEO4ttXW-zqdt1D*7]W@`] PW*dt!ѕ\Tt*+4u5@]$֕ IFW]!mP )cd] PW4ՑX:B\ )=jrKIWl5v dځ<\WHYk{K_7|@=WoKB}f. X^]ԋm7f3[@ۏMצobޜM})Nmj@}NgsGm-Gmm9RUmϟ~k?רOBx_8V<`G\7^j^)673_a3bxRlulS9{lkncimU~?-NY=o/z mw64qo a6$XΠ3|ۃ&@2ݥ`Keoudsܶ(8+6sqp j( SnrEBS^DZ"RFË^t{KOHd5rbkRwoi{)4d,Q4҆'0#egEzp,Ѥl"+=_ iRvެPX`c q)6HkbBJSlܞEp-}$(^< i}MF2s r{'+TINuJ+8 2\Z!iF+tu5@]iD{S%KOGW IJe+4u5@]4tZ*AEWHkBB܆YW'ѕ5IJC‘I^jGھ ItË~>ףٶkŢPzK)Wͺ3<`A\7=؉xUv|"C,Ի[E#Bqr{R]^7.vLjwUi^UŤ jVmfV2]e6fJ˩dz̙mjyn,YAmdv4rw5`N<;9pYzi‘L.nDz_=w|=@IB鿂O.6kckA UK{ν/X;iH T]J6X6~~ku1K|)@u; {:5[):u4.-C$vm {Vq͋Fv͔ iٕ~/db_B 5|?T.MtTX 2~,PxW Qf)1j3Tm(Dlk 4͹cl{}M.FV5vcZVG D)|ĝ)mIw7÷NF`7+nW0=Vy฻TvYC%@ƈ)D;z[^;y/9ml(saaQbXCO|Z=htx-[48hU))drbfbWy03M?WC_)fS1nPfEUMm^jNᮮQ^L ؿL6z>]޺]r'CoPK_ƾLy7~02%}HAߛZ6_wO{PTNׁвZGj5z ާv+bߖ |_7l|2hby6Re`+(!|@iâ^ն&rx znϹN |PaL2c7ɮXe pqP0\*Gl #52(BWtZҩu$1g&'1EZj~SiZ<فl2 uCmLђ?ix>"54@yI3GyY-6Y]Yʌ0rWZ?}D%2ayB ÿ?`gTC=, gX9! f`֭Egtà>/%#&4JŌw:^Ë́Ϭc%"~KM}>!:1\clǀ֣s)=`.%k)l-aSbr"X6E,;<t iD8‘ dL`LNAsPG92z Ȣ`;K"V?"D4AQnh N 7'_[Y;_^Aeڗqn9-J#b+ݙc?*ڼ}?, *v[{ 8Bg֜F q`b*סN n]_TR//:)HĨƎgiY(qdUU-D8G?` ~*VQeU2|JΨLa"{+3{FE'~F,(8pG"hK냉KM-ah+Cʘt3rv Nq>r&Ղ.cEgЍ.̼*Xza~GM%|iaBG gaG n$3e `nSo4*kc&Pk[-#*$;A Uttz9%x-zrv Ŷ+/Vŧ(= ~ѳ|.ϒHsH:?\ZIzxZWYq]@gݿk_Yc8k3 XEg\V>'Œ )2Θ13AkgJ#䏡 1uء0&-׌=R(l/ZYDrAO*j(hRґcO$71CaL |eEZw7ٯjg>n?ϣaXb^{fby iqxFehe8^!ov^Fՠ7a1mYv$Jvw^y9<"|k&K>ʽd"}.zb v8Okebg3K0j#]@1T)8B^it#8K ]7璁{I. |o[jm~?Oy I +lf5Pr`I;dPNɽW9怯[_ V[ Zchv i%4UNfÊ̌`^]Qλl%7"JQXfX߭p2}XXp{et?j֑ug5J *0 Kp:ʃfCEhazm-tHu~A!mxVgNfJhm{I⬀N_v"#y&jGu_r>ۃQ4"*ЏEBb漎AdU F~c%et;c%8IgP/ ZQeNgE+qFqbxϙ x$ )[X1c2b=6MVHKD3rv|QeRIDȬ(Sy!QQA!*-46F19QR9^9.)6!J)TPCREH@K#rsvHcLL)>[)r jglp>-QRL@}K%b:cL%յf쌜횱;ҙ.3Յc](QuaMQlqKծfYk{zuq]'z 3hX^ƶcK}sk Q|&wmmHWԽe $:zgve#vg7a1,jH=5?_")SxSŢ E&aTVp dmH #*C_4Y|/Z cOJV63lN+ \gJEDKCF enbEmv`x& B!l'J%VRq;xiXF%p̞Ζ&eʊquJzT*]y3g7AEhq\Dt#kq+=3M v3 8F oud"CcE9iVxG"#)qFcNT8R%&iu9V`9UCM fnD}WrrisQL\9,.hpq눏*kp,jwz OE,I\2Pr4\<.cgծki~?k^??g5>b!>; e?*imHLlD(Μ%[ Ҍ]Z(BiFeOpB4` Bw;*ndQJ=N]$RSnr/XΗ `ign/WgVa)m"@(G1^;֪-7:P".s1O%Cʹ첄U.`pbabTfj>ի"\z<Ξki[:?gSch!aاNf\yN"NNH9g CxĽkv55_oN3݃-ۺ \Ӹz$j.JQt#Em_+4У4#44}42!,C$#R~HR R9 (CNX МcM=q/Yw 쎮]ȭͧCI_]^? b>f62nqknz{8;:oC"2l}ܲ㖫dxΗG{rxM[[}=@|@Le;:N>\ˋ z\COwmm^2n^~Pɼ>rkbsI+6_gf?"V6Ղ~XVЉzՁ(y˱bu8Q <BR?ǻY𱻙Mtܓc!s9 t24EP@.ZRHӠLA1$,) y_;kSQvt#vtS}X/C\*B V 14/m> )ct*NQ*xw.䄠IEOiq4p:~GȲ\ Y9-9QKy40e::^wA+12z!9 ;M/Eo V"$*"IfbT8$p;MdP{ܠ'lB&6d@I )ʠWi[SԪx>-'39nT22۰+|}׭8].=8Q/>c`||0Q ȱc lA"B`ISҫhMށ&{{ s1&;N>/A7FkΈ+ J`sy4 WBرs%T+]JX5}C tg/n Ic(D.d]0@ǴO*)` qTgbik r 0Zx%!XX'6g{JW{/|ڇXkcf)Z闏bZ;̝(Lײg=w/>vƨ:;:uXE&ߜ#U+`̕q'Om8^]Y&֝3/ֶTAlr0a.@s)HyIʰ~2Nl4ge~0xsjJ[xޮ'n}?\+?]$/VAjx6(\V9d @/drCC B_6`hr`=R#| ҈ ]դ֦;#|b8kSˊ$+:0&#B7o7Wz7gO>ߣzzQ@T[_lQ-^^e<xB݋N D)k.Fat &YeJ(. g߀gnߧ=!ϫC~~V| bH3;9RP0"텔Q&b :mCA>^qf# dPXR1SqE=tDko*Rŋ674q$] sv_cǡ:C:): \Qjo@ ^'.%+qX\1B$QT&SQ)H*iRr>3 M h2JOkO9Z9V7l:]tc^1O4uz= SO6`G'E|o4M4f_m] {{v:ԧo77}RܲzSG곙ũv-;C7k=da GV&7yv9M5OBfIWvvyq쫾zd:dZ&|6e6%}Z;..rfKj>5^w C0I&`/]ƪXg몭 jBRLf_n2(wooǛކq 8'!M>l6~-le{$Ie:'g~ހ9ewOz'nPZ3ω>/G|rKm߭'{7vhrY^i Wg?>q|oI]ncvtzl2Mtlq<tZA/NUo˃&,["};] := lҒzEμņ}g&7y=ֵa&KT_O{2n8x W#h{6ݳYg?XTݾ|fFhgӊSD<3Sb8ҕhBV.ΐ&j{/mG+^_@fs|*?cF Geoȡ>:$"Ee"md9L/ :cͰ߈~C76ޛg*{ eϪ/=VYDo"P%ӏZij`H`π~q9}zxϾ g70@ytȕeS" Dc=Vy4<ڌ~ oeo}oz[tRNJӭmM6ڦ[tknmӭ=fZD;qX|z=lB-iNlBM A6!ȯ;vׄ [SlBM A6!ȦwޚdlЀ A6!Ȧք dlBM!OlBM A6!&ل dlBM A6!&ل dlBM A6!&ل dlBM A6!&ل dlBM A6!&y5$u5p)Ws(Uع$ȥHZtkCӭmM6ڦ[tkn8ktUWHm6i҅Ut#QdO9fjnqebفN/n1D~E3p==&_%3? z>aI7$nz)˖:~Iޙ=;*P 18NB`%,$ѩ8l1J%/}/!'M .zJC㧑̇Gc=b ™}@CB0Z0_tn[#֐n3҅xхE) |9!1~)ǽO-}edzO,[6G|ggm;]RT./Ơ5ld2ł9ebLFօAΣ(.y" x dLR3@qA`P%Bofn+^<絡髉ɇ!o%-MS7[+S}*vhe.ӗx<|湾edG@@$5I9 B)Ni, JLTQ"%WF$Z*H<+"Ox*c6)+t1,ɉϕ3~살2RqEġ-jH"%Kp<Bi&_|m B7y+Ո%- ϖ((AR,SGڂ}TND&i}psgϫ~`tOױͱQF. URn`tZH ^9LCeU)S2<@"9696˱z}թ mm-Am;Dہ`I狴:3j.%«Vg~NLBslY GJ#OUcR;yET1B\BmL2W~(`W-QD! `䠻"Pyv bR4DA:b e ];9=KFB) W?wmbqU|ٝ,pA-D<b%Yiǭ$Yd=>Ӫ\zqWOmDΊz. >zLSĽͺpMK̂|g夿z"Y6K,BPy8&V*fO$Tɸaƛ% $R"(QhbxLVxKAl=GYL%p'bR+w?^d=}tG"gM>ҌfcTpޡ9Xg(޼BǢ3.r9An2{Uru%@j@Il0992(p4r`9Q7»*dH&lRO ^pC@Af3L\ܲ+ʆZpiO4XrHzmV6&H+q'ax[䌯JY7ĕ԰FC]4uKԯvr(26JCpL!jp+VXe]u.vO5-FIjQlhQz0/^Nq=cכ3a++an8Wٍ͑x2~V&[ (5-45#1lϩOjD#8d<ps4ZkuɦV%g.[ȴ!uJbRNB$}Jb4^ >+;JįʆA^:^ߟ{~-뿼~ъY (\4X5?=iV޲i0xDӊŀ.v?> bj'Oe 9sp*'"+$6MypYZ*kz4MMo:}x3:y2XZmb%#'y>R,'7^d+,HfKL3NelM'Ny>FSϥd MXwzŕ (g2{o=lgR]iab[IҬh#Bm3W謪*)w]Ac#J/Lp2"u hSF:{$ EcM; k$ %3bVz (9|M@f cpлP*: 1ar?iP(4̨\Y ?ЊS @.o Vpozwxa܀h~=}htr|[Xpcb|3!- _ i(aʍh\]h,`Z: 1@;/*_h{kIOzK`Qcڿ5a (`gF-Xt[/ tC:dLߞ VKwgi8]jJy{ 2nZjJ}}vٷ"<)2\nTߝVٿϺ"aBBTU\ !doأI͉FQsb79(` D&-(gybH'oΙNɥdoM)zcp[,F3YpVzP-9}TDuԞMʗdJ .**˝36DP%'&H ܥDMȴ%+^̒ <$ Q }̄'jkFK ?n?u%5=91`ƳOu `Co'J:9#[U.MK]َjZc!yT\^x@kIh [ &#%ҼvC9=3"`b.{F꘭ѥhr&9MPs.j2& [*4T[[Oj W 2~[Úpx4,@w@Fɇp:-{YE.s(DJRdJ Lb3L8g!k94 sQ0^(fT U9#Qg혱YMhU)Mۧmsb8nmiǩV۴lMo{WC3qReRB;F <.ȋM^ԭ,vɑ}m5q[׉^'8K{iɉvyvYo{WH6-N |ώZD甝X%.>]=lK;bOd{b ]tn5(BsF:bp\LяZȍ mE?Nޏ &u3=z7NUI@B tǢ@HGp]ArBsUb`} Bx \ P D g>& (yOx)[4xbZyAl< M[d݃Dv/I$n D ôfjpߏ,V;s.(?{rXSA/#O5s2q-s^%ΓO ړ(UAZ\cbD2d/%c"lD4G<מKL1 m_skMÓxf T L2;60e6Ňl:7~zP-ϣg=u_t JTmv\[z|W3JBI4gk\P',R '|I(g`[K- XnG6Z%W6B`Zb@'o2ٵБ;H]wesMEвg\u Ki>tud:QZ©M2#VWjSɜ\<ÕĒ%>ƔI'A$pQ3BbFx}PH[z>.w,:ˮb=Pb>.+tޥ:Zu _u^7񚙴tEռi8\ Ѭ+\^D=}Y‡+_QFp7ot1:\T~dnF$]ˢ&mԛj ՛SCnY3ڕo!Rlϲ=/7[u0}Ap`? ~wټhyE/8éL/Q_ˌf(ۧ *=gX\"%> L gdH`'^5TLUDUR\@se \>sE .\iv\)1WW/F=?z|}jd'2WWO <'sEgcƜQ"%Ez!%?)1?'?u_RfhTzL0BvoiP6Nc0߿?TaSIe\nz~}J c7kkܝUn3tIVg%n\(][ Tcua8к8e’)567l 7nM#zn*A O+=fޓ*.ԮhرpaQ-;4Fd" 2$k)}°4n*Ӵ_ ^iyh͕RW:&tFLjz@MpB2]k\y O܃Aɬ*pH̜q-2="R3оޥ]x5i_Eݠ5>.EDv:D!?gz]Fl<ߥY*MT*V8A1#3X2d]}P9ϛ;!HUEzr7NT o9ѐJJ-At@r=ʜ%x u}d savnE);IM -ڒBGZGWH }fN^Dڅ6G;'Xr!xQ+QЗֶڔ^*QT,tRϚ.E#OAQ ׄOQdO])i0f;Dp+ ]+(I׆R ӌT"yˌBi. rNg'`b |ZCzmV7k(QLnB!VˮRWHme±&6V:wD$l0]? VX4oq ocs':[`>n!\"U0VjVa   ʄFGxdp .qNs5`AϫuJ+j+1w\ 7CA a ȸALAAXw[@( `5ePDdBEA[(2#χPʨ[sV ,,x:M;"q b004KPI!0ά :P%@ `->AAΛ:2]:Xi65lD]`#)#ͬ=kyGD)J6NB+mebEI]VkD5=() ElD 1r 9 VDlZ"5ȲS"ZJ( er1! aUh#ǻ=sAv ԙy?/'t7hz1#.EV̺HN**&6Ok$% !pB6}fC<]gⅻnod*ՂYe mCka&a-A7 /ፍPtdUt4CҕjIJ2Ba1%OpHv9k=/3+XqAyjNKDNW2^i C.kflq)TNX)j^ /h=V =liV@$޲[6RSң*x/GT^`!-AmU($`F}D,a* z0dkR@n㠝{njt~Ǵvǹ*4]SW5 Bztqt*ih$ti#ll% f='|v%[Y:Zm5EZSRP'ՓFCoׄ bL%Ѱ-g' <%2`Crh[SP.O(7"fh8(uR"˕,.TP=`ePˌ`*1#K[Ƞ 2]`-yÖ V^/w`E^QH"NdviP'7 B`~?"oQ^1bp0) ,*1Hmr3u#&ݝ RdMKt/J3}}>l:v<z=>s=OUEC9Œ5v=h\(]d'!:J@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; tN 5#9lR\-q헎@@; & N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@:`'P@ @}KvfsQ8ֹ;2@JX#,; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@RR9ҝ@+Xr\FqrN B); S@b'; N v@b'; N v@b'; N v@b'; N v@b'; N v@b'; Nq}>:UW[>i{}\_ݥzy~U6ίa6=q G2.n3-޸0:GCuD0]3jF"zK+Bҕ1 +f"ΏBW6ƥӕ!GC+kb `ơ+9hK+B>@?\Ѹk1 2׼eZ"qS~%>Y}8|z~ݱ"CY {bЫ z_auӊA]J G]qк2 ++[Ur? CW7QPztut嬑1x^DzynsNAd@l!?ʇѵݜ}xnLK?^A{n/8Z? N_ol=1f?oK ~8zJ}c>t{f#FmG ut|@pVj]n ]j' 9zu8 oN/>I'H7|^"pxFo_hBtǼu_6ӝާ .ޝ^Qm`~2N=ڇݫwދc9]a?6tet|M[?woͶ_O\Sf=|<{7߬f|~L~{cw9~-fїS՘(ǹhDö!Kuk8 Q.'L*]9LhQvLF\An;ydǹjgT}^5wwcut?ld'c ;!jE\ ]"ɹ`RVYm1&PBr騝&1Y\(֊JyUWEr%0 +)O9Qnɍvv{tFi(4 ^,Ti:@|"8tEpK+Bx'!TJ]љv[l "q>:aig0]=# \{_~j/Jp1ZB>Pꅝqb+tԡFc+0t%BBWnLWHWp>Pf"/vZ+NWtutR ==?tqIa_NO9]‹o6m?Ok uyW?C1mmd{\m>uYӫј]l+y{FG/>=cj޵6r#" mQ|Ev p $Iv 'bweWk,N7EU_UWǿN&oO6NPDONt!!%r!&veϏc;HD,l(@?#; & .2K6(MϫF#ߦ5NiϺUQNl?Mj[2~S~4Ia;]?FHWvNlo ]>,GW|4J{^o"lp~1[n+ g5{Yl] Q+qݾ5&|KZ@YZԦjrvz*M\idg[JJd\8PRH":+WRYSjI|oJ0JNiTa>NXEW. ikeDLa@eHYr0,8 Bg"H "y+td&1k}ar ?Ҭo5h|h ޔ9ygsYw؂ l;[\j<&~!ѵPޤ;l\xKpsr r6^4?^Z aޗ*նvl[extgea僌bFzx h iaL>œ9)jFP{Fp+ Jh1Dّ4.H13}a?rGpT /SVρk(όFZɤd4Hʂs*n K̐c"CcP$.pQ(zV sb inU|_T[;nwSDwv_wg|MN WCC(PԷe7 H ?ȞRJpntM4sC/jj~cYrB[gIKuH{@חrK~=M&~cKxent8*KH^ NFXxRF;DSY+>A+Y~ONps"C~ѻdf{q>XƏI]ijKHl7ґr(zWCʭ51dY/[F?EWNAS5Ҽi48T= ޔY:M`b:D1_&ySef-i%撞y |1n xL])AX~Xs_Vl0HkZ҈8x9"]]R7moM]UeMC3-G:9|S~.K$-',+NI^Y]c> H|5M-mKTRHl -p'ۮ1&ۂ#lalw`c@ȥ<7eg|K ] s?~Fzz?VҼB!,75a+6S1[^˖_34y +au4=PأQgÁtVӋl^n׫HHAa{;Imjߜ,gX^yJ/.oISGk6td Ⱍ\ͿlV:fv87%*/ 5帋~8&~43k'`[%i}]U1[ 3πRKʾjLޜ6WzH=ZAUPͱr -i{҉\EuL=S=c}pOY}?Sq")cJKL%edQ%ɐqE8Go^8FN6qtAH CdV{\N P3퍜z s\@l:1o ѾP!c2hջA&A9«J20lu/xdψ[wBuG.J[Ϋ_qjxzk&q w d\䕳sm.ˀۥͲM2aUgdl}([VO? I%*˴ yY)|ibJNѠb<j5w ,gֿY@ۯ($mfc<@ ) Lp'-FZ:ct"xTΫU㏟=A|>_烎Wd; X0$h)DĜ֖{u&'mۗ d0m&/^V;-(vقo 7 +_QCDT稡By5'B ǯ!Ihc t2=}R6@4ٸX)u>=c*yk+U*{sǼ?0 Fp 212Y  Ө2s/Q$~{Y‰#1<ː HYX"c MyBBl͜쎌 VCkk|ZE ۖ/lnunGcesfnUO;W .<*9#NP(Ϫ%Fs a'U0whiNRœhF/̺󓠞pgTAJ&Ã^%3 yZ ]o]ȲCEf ' x$t1%[GYJU>=CȹՌ겮x9Jftක Eɢ)!ۇ \TwCt1H$RD`%'@cw9q%t vdGmQY I*驁 EzK&C9ÉA81e7rz%p$%W*E*sj(靕&szXcN\@gD0tؤ8.T څP˂c -1OmsVfBBf5TA'ԽtXT>Yjƨf7xڍ\b"cd@%`b]cL*\Av&$K(c$cK ¡"?EWgX.U;AGw `S$st],Ջ93s]2cɁ*r6Q*#R1/gUOob?ͦe^("vObUmMq/1m1m@*,6AzY% QWGU )ql"Uu:=O8ϰbpP%㖁$K)HH"ʣE :D9emqy+e60[MEvcY ϥ5S@ctĄ- &CG)y!=Lhr{"1K.zHl.E̙L4At+p@mdlM-fiCд'¥DoӋbIE<{؜$}cZy?+Phy4}˘u;'P$RR"{iA@` 4+ exv@P= ) lJm&of69̂O#nq\q1;miCQmQ`7jrtbg%JZHZ;5cd$2og 2@^tĂI`жLIG8$kMq;mCѶG7ith z\| \ ]&z[R^Wfs7p'w%Ap}3E?&984V-ջqJ5@q!J&g+ Lgbdtz|O;{v]AFMR0:# eK$քDF.(e_8M{oC ѽM׼+5 ZtE byRA'%[XK cGI;xe:QR N%s44AF *X4?+h"$-UݛXqFw8ͬɺȥ +2%ՌHlWn/*sϢ/KPdl  NEdДi̎+fRM:xxgǤ!0:|˿|C.~ 8@ẒEףPhޣiVޢiEӚoӮlkڽ9}\gv n38˛q|7gՋP怶 _UzqRI b&K?!B $S+)BrɈp\+>P$LcR7W5ܑ[f2s#1$LYH>'G/V*F/5L3:M/κrW5x]6 >\1';Y Kƛi_|6[IҴh#ܤ16JJ+pVW\sW1_{C&H׷ J=<.*6WߪHo}\zw-m~~mv׭4;'9wo[T\8t: }8x$zHx/n{&4TW|l u\CY6[?o974xAKטLwU ZU!TlXcӖ4\טw3$G@Y^5f#ga.Ǩ΅Riɠb(=Ϳl,W2DO Jx>䥽kK41w's\$[ ҤW) P"d1*g*c`9l $<"8 eps4hXvE<ڱ5KioR=I;WւL^"]+_UyIJcmWWORo(-&9gVGDU␫'Ty-yp o.wuYb=v nKT!f"8˓6BL4s qhd[՛8iׯI߀cj߽b<W2"ҟ͝&&9= c0@{d[9'Ue{Y5=W],dgӕhل7?>?y+&^ڦ+_yudYe@t@?>?6oD2K{Byk!76gҪۇdr@o.+Z4_6e|!޹=񓰒c1PآM^N>@ymB5cM`L 0Wi༈R0ڒq".1ὤ~`o/RS.ҼuKIvP@2?u.xujQ6e>ͅ|@=ZMrVx|%4\TfMn3_~C%}'Z٩|g5YgF]e)nxHm$]K$e6M#@7xD0,j'ERD.Z-'9D֊;TekTgr<"m󈬁%_I umw'ceZ40\ŒԖ;%'19H |#Ġ.el7{df`V{idj̊br"H4#PFo DmM'j:*_#BN9$;\Fqܟ /vΫk!s֤gKIk6J [*LR@BVQc1fAPѥ9&F 62&؞B2  PM/eH[b_0VrhXOw!r/c֑+@M>HI쥱)̑\&;g!kWC@P= ) lJm!a1YM`u)`enMgzȑ_m^d1>`.-v<ݕtjcVŻ, $!EːU.>AED+%VR1%/Na1օH`Pɱ$U0Y_U2!bHLɅб=l8aKk1ɰ'cC-""qKH$4[1I%8vq,|J{IɁvdvDo{U" >3N bq) R5ހ/B ZUI@oaOv%b~v~tFs7EW׷9 %4_ +My?n\#jHg-@j5c8RC09$ҢB+ɲpDBFKm1iiK@^UCbE !/ۄL2Ta,T埍gbd.,/ wQ|sM|J^ FV;-E{e9O|Ts6Dtwk7Ce~iɩwnA7Γ9Ë(9T(Ӻ3i L"S%#R uv%;,Lt/nΦ36gtŵh`*Gk[M`\Qz5|6LE%Ul˾]!SG\֌wvիGgZWO>7q>;yF&5j4-sowhq`~{Z_-̯^zau``0 `.×\ Ϭ392G^JYVS{A?z'0tͺa4gb]ǣDOo^gn^^!7YRz}ZjHذu8ѻ_嗟o~zw?T~旟xR-eqo0]_ѵkoٵFأk+|ńӯ7P}]YTW n@ZzQp7=-k&3a\\ /f5\=N&Q1xy4=^ͪMpB.FC+; 1R!M Q5& ^etP!}SL7?Y!1 aa@ f49d ,&dX"3Rҝkchj ?]q@&)dSQ1:uBs;]V􁕅6>o,Wo^i<^\X$Qbg@i'Z.-y|3A# e.V :MP6ix}I jD[fj} A,TZx- H=BC("&':1F%b]2 Y0:;@tԺ}Pxa)Y:&aV`8R )i &QL)y6Q2y*l)!)Qk#) s3 \pVAN5GW!&Ek$DNe Ys]~%`1G-&(5#AiN5iRI2JI%&NBJ** !*Xє4s)xnE]%|FgԜa[f۲=o=ƜB=bc24E`ggd^ h 4 T0ӟR~`^Wһz^;]vGؾ]S?_0߮ KiRnۣA*6-Ԣ<|hQqAJ}2M I(M%YM432TߕsCz~E3XmHc&h!_k8Z?AKG-sЌstUJr x D!K%P*]'t5E=KO#Hs >TE]Le(1xEյUgjQB~U=46q8 ?v1OomC;hp?f:˶JN7zȱXfw4Xoz|5kیk{tqmAnt>R\۶7][3MŔr|ww_nwoos4ņql ,ufMooڝa2oioyu~3#sEh8ݣ"GZ{-+\>}]F7T=O2:-ЗAoaqaF:si+Q3k Wy~ynRh : 鑪/F/dl22wHz~,'NɑtM=v@hQ#Uh,?țo'̭3so|7b IH9%bJ*#XozGIh5~wmKUJdT_@~&,p'W2|/OáDI4es5]_WuW}*Ozd0m?2uN}(5\2Ns4BXdf5/੕~ܩSf+e3cw&{7ҹXt`tUVcA|@' N%IiFgaX].D/9e#R'7V] j9ߘ.E_|cEJW7LV niAwQDģ1Hpo+k2VIqG77)z }f;b?wN2AGOd%i4YL^sT)R8Y (ZO1E:%e:ѲKYsZEOY㧥~\ǫWgjbLk}B3G.`s7+"iID` ) Nm"\~'6 睲 .{V2)uE\$lFcI$G_vz273\K%tvv cM>YBIRLxw: Ӓ#w. eMR4R8 LqV!H ;?l]!9$2r8^c5hR:vWf:y:AbpY l8Ξ9N^(#xH>;_EbT1ČD=`2(QlA;>)zߴh]. b mj/kn ,[K]xQMЅ q\٬,lA~|2j0YfN$hzåE!{?0Vw#t9lLf,ZSB'E +崶༈RlDheBurhl bYBLŒ5|C]I,%䯑{3uz9e1o)סMZ XQwt2#冿u4uW].m=0^:rsni.5$lҐBl*9Z?>,!*]q\2Vͳw@=I|4:n]tr; ׍h%ƿ_[B }&-q~ 뒓UWIWd]VrJA5t zUGӧZE#!Ru=zŵ "B= ӏoI(]}OCԇY#P8$mmnfct_/eU8n4ݴq[Ν I؊C74?m3=Ʊq{7Ipo[X:xUKz5lD}ީ;WwqQ567n,6-/fF>őEhW-TwW pu=YRm67Y<첦i کEo{n۴X b4w>ce{ϝnWvm]lSnï0.6/g6g]pIƁ4V+Tv ˍ͛뻸_;Oewc"̀$ȩQjA&Q(29q%ta756iZ-Dt "i%HOxg%UVI$2}4sDc]L GSRW2v\4ӘOξNQh<BEiIPZBhVт#I|6I^x+Lه>b>Ef5EDFnGU%zUO:֫9Fph 0XNNp!$S>&ȯFM$Kc=ұqD_-?uG(=dy˳*{6Mg4u'z@f^OblsU2Q 2'+JXQ,ǁH猿όϽy,G.l927Q`BKhX.W6Ndb< ƜKw+""8.DK7uT,pђYxo00>L%!tu$!NyVo}*~?돧wiLg21+uʺ21?pAKLP|WMEg,u< e%gDPuWZp'^dU2nHmJ}I>=XHr.-٬6~[o_Y7&-^R{CuX󋆴~ۏ7?]b( >Fzb:8$ #Xt0#8z:!q$Puٓasr!TґR\!(]]Y/@yn;gv5x(7g[aj/q-fGZZ ^Rj+\z.FqBpEONHZˠpU|WWBjf  UWS"}"%k+IrLѕiIkߔpЋ$qvF1(tRʚsReo\ѝ=wr&Rws)@--y WoISK!ϏLFCKr2 _J2y)燺Pe(7#<3fތ?ӫus~Gw{g5y$lĬT}{?>nQ^3/wLx(.o^x.0$=M Sĵ'Br.n)̂h(iW;KEYT[\\LwYWMXYM6dxH1W`,2@&)  (3z=J~$@+QX vB)" "V2xNZqgpTaMJ.%z1K9\d#eeP:T^/t띉cFQ0;ÂٻF _G1lA* uq|5r,xJɓ` MKܥlSEM sM1$U /ɉC"(fCNJڙ85uBW^]}KaI_ 7_۠:|ќAS7mm[k-Ef˩R9*oOC2yu\(=8(%hͶ)XR:|b1C7^nS59="`c\*3z]6g3j8W2Tؙ8;@wft͌Ctz£O-{9\,6uvYhҬ׿ ~/ d!r/c֑+|&K )ЬPNy:fݲ2(^HQ`Sj4x &o,fu)`l׬[]tW\ޠvWPԶP{`7x@lbڂJZh; w[#F0/&A^4ay8L!L&&HE%}|*@Nu2kَS,Y78cDD46y#9) QW|4rtv ZPe=h"IJeC)"i'cֆh@3>z2HNlI 0Wr‡t&vDrh=jO{͒qxl7 (INml'ώ̌{sNGE`01p7x Xv<}ȭ'q ~ʼiuł&m{@.MIGǖ`&,MRU<B5ĉ ґ9drXr*bp,cqF11$q܁ɈL 1h!GM+,@Ȝi`s=m#,F- gJ狴NItmgy27,co4 [&(,xS"K3oz}"ԛRc?JY<{`gqpYQ-gZ/GH θd욝-%MΦ7VgQ* %Je3.*!ݍw4o>,2F7Xr9v1Ş\O/]j.Q.7CooU@5M: ?9V[e?>qQ!&aTṫڈ{h[Q2{SV٣W*r_<_fV]9ƣ?%>:G·akj dcKn骩 67**i4fbǣbm77edmnu1ȦVl ) +Ǔ:is(>?ѿW *$U6O>o?P~ox }|[qD3pm g]0_݅_E5w??iVޢiCӚ9Үls >^>Ro5[/ )?~(m0 vna=G_n2E2L{a^Eݫ"pkC.H3:/^2Xd[}}:ȏֱij8?(Gs '=0$*L2Qc)-w@Z1A%8K6,T^U?Nă%oq: @%G1$H'-d8'G/V*F/5N;it}*7Ğ7m8{/57;'ofS.%5(cQG<7F 5y e,-_/}0:ާ^z/ҨwCvjr&ւ݇}ӫ.GϣDSԱf{JGXR%?j;z(ye%|[ XpF[#ͶCZGBZCZHشrCB0 V4A `%%A,+HX^v+ \IE> }W}lKH!a&@K "ExɄ+'`LȐdPR)R K%&k3) Y X[D Yxo00vwk;ۏj:P{hƁWS!U<4uȾ5-6< vXG2( %uX1jPݵ% e魀SOBYz)e;LB jaվ+tN&-9 3YXDsj} N$TZwJ3D\Q%bT+Qd2$JOW2DO z;Zڴsj=89\G`5֤%R !DcTUr!,WyDqhX^O1GYi1sr#FĠ>cyU[sȩ}}&О*:%lbq#"(7^4Agr@d^uef'iۿD4gPy@<^0 P9^%*$[X4$07 X^P;Cz 8%Mb R&L,O1њΙ+2"XƵ֢9v8 2{ )#jY1jfy^=?7UA@=A C,dVR° 8v49.IeY9eAVɢ\@zR^K:׌nU.tAdP.zC y^q_μe6(pPKI,ꖹM)zj{XH\d5.{gkE ugGyr{*n&wa`c 4vc=.ZC޲/η-Uwݫn/#}'7vm:ꨮ]B.^AC.QJWT)]Jos4R̖y6 dE%^}T}}pvyfy:~('Nɑtoy oR^픐|!RT*=RsJ8y/sqjJȔb³l,i;@\Se62Rk"@{)vxrs;Өz:i@8?.|(x@g5 Y:yQX_,ʬ;_XbT1l3g=eP毭 "`ݪom՟daNd׋uZ4]Gmz(j .BɖRJ,yUЃy\o\KamdYet  #V2s+A7\ Ի'Utki=ю7_l^z%27MtB?*MxrWlSfFk6-pfǣ 7Ù1 C}ariAAm2IW[7ljGDn>hwib_fPHL]H; S6_7SJꖻU`>.s]oGWpH~0ۍq~u"D R38̣꯫ޮmme'PY$6Gb6ŋFO`"n]WE"~ouQM8M&?"Vy>|iʬ()>_~*`ٚw*s=DdO7kbgTegYirlhU<UI bReI|2O̲eY7˚@4=7zn7{xn7g[qI`LYpǔ5qEj ah/1AzUҽpGeCE 5l 07oM3, o 'IT2#BI+EJ#Ҟ!$.~ga>jM]]ݍƆm܂vo5ؾo@YמsUOʼc(IN#픦_ߞz}4MqQypAvE]dNfOˇd^>a$@ccaH)$(o_>B3U52kǡ[SRAM͕8gܠ:rrmPq7T#ݶ D'^XwKC4zGVQm"rK2A`kV!|@iâ^ $"$ĦzaR[ERc !xP'a 18Okn_ èCzd1uu'-ooO[}:m+9?q^#yA7y!idnz1gpcY+^Eh'̈́ϭK$79D>)D6yl^X!]Qi*Zds8#I(VHC@[R# Bn+B9:8MHۨMȰ, Ƹ#aܑX+?"D465K`|DH,O9ƿͦ i;y\.(|*TL]qK[9%=SzKv o^7v?ϋM5\M_̉|v|-0κԾpZPrmi9@[lp@8+ 8+=FjPu^W\(N isգ _eKȫfy&VZS^ kU{x}YY`w4/?캸 $| {wY%`QEͤ9gzjVb2d1]AMY\շ:^5 se4s6ԝUէeԋ\cstʠȪO4Be"kZ#aUV)c`Gc v0=7^youa(bTnON!r:MOZNf,s_ToycFȧhai{XäNH:t f7Ĵ`pĜ$T^d=m%<y,![nu '6,:H|=>Vr.`9ϧףxKn]W{ɱ5Qs-9˝R 3ՈgޫUů| I4"sڋ!bMHOKs[+ ҜQ0*C4eQtTkр3':y˦CASCLqwn\ڙ>,&\;] ZJs%O(;a1 LXLBYJjBz:RR%CA .]VƁ=] ]i"]%; HuZzJ(yo R[v:CZDXc@ -=j7k+]zR]`*dg*ը+tcv*tutEXv`;CW ]+@K0m;]%Ttut@f]+:ٴU+th1Am+@N=]]qL+iwwLhERNfv`):CW f]+ԊY*$1xtE E'#2`hkBjJࠀ]k$Hgh:1-F4P>EVl.&;K LtPmQOW߅4U,CW nw84>,$CWzˮg".Ϳ`qSv{콒GZb lۊށtOWv=T`."3tUB+[OW# !\uGvC/Rtut)U]2 mv\BW m+@ۖ񠧫BWM+0 ]%3`Bt*+oAxNUWtFJh\x'Iҕtme5T4UL;w|]dX?#HvYD$;I/ղޮd~کgZgbfN֢O-2c c}7: D1,}ڴU>}0}:(Ln"pmb#Xs5Aqe16yp0&\n96%6c:,K-ieoY\LĆd(Nj֦!rǜbna{[y?~$rP:߻1lF4"},,X2QWvگ+/\և޻VŸ0 %A1'$ "$Elq,(S> ҁblxrxD*4C]7 0(҅] (R0!S&CҎYN(֘Yy fG"K>56kɦKA KK`^-)j7W1Lܷ{$|+1LFuJޛŪlB2t9L.3[r%Aq >3n*b $8&5rΑbN"8q[幑`CctpFJam1䄥 ^*m4U aU8Rn+帓F% u=',m,L딍X:32X'JSND"4~<`7 ӯ!6NF-a4zv1߽_=59Bpouzy*{Z=?G-AhS$`e^3, Wh3׈ 9yZxS0' KRGVS)f}vu= UM^:EW?=jvԭo>Z-|u]LLn ?6WM[Lih}L.rgKVZ·>׳շ`do>e=4t1_@0g-= FT|4[/ fa~F `RX?/A*o0nh5,j:g3>eNar>`9 x?pQ׺0`Q4TԟCX4<~{r|{fa(\4azrgG]l[tB=#FdGZoEm%쟥, A~Z\޳O"wC󴃲 1`k  ;ĬW R6HF7g)Oc,*T7cUԸ:mjCT>w{EX[;s hg/8sI ӞteoC#<k꺾_ƗqZ{@#g(J= n&]yƬ[|rgL=7 \L.510GxtO5e6 _~0bۥROWU<>0VSiJIdZ.mR!֝#\FЪ>è@Rg<&T7_ X+X5RB\(wp)BL@ƌƁZ0cĜE NVFߛ d5l9o})%fooaZ5ۺQ [GLA}5U_9at8r" 4WȃVDngqԔ+z{彰=$5N a.! uJ`Uc,XE,6-9FVd&m$GEO͗⛁Al.]b@I۱-y%ٻ =VKlԶ[f x^YfXE=2w)\>bt&sBNm;̬7q:-lœ5kd~w5>:n;+q<5̖Oms;tlov73h]7p'?Zg!6Ek_KvvDb/6v_GeQZ_ZWۻvtjU!alRˊZVj}}ݝ7~=?h}=L'--n'HeC.z\]|uGI h჻ºZ_6*[jSپzz~A4wQ`V X277L;@rvǯ枲x,ܓUWnпSq`ybu%վBEog Wmf-O> hA;e%LN(G[V+upSn K1IDx,)B ]4F e@I>M iSD)QLO{g7=%8-6RorS2v\Cq~>Wo/ܶ/g5r\ ,Rhd YaBǙLimQ'mۗČe5HΗk(v->v;2{7'T@Jy'*yKB Xb 8y_@FF#8Ζ`@4Vba;w]FnsvJz؊votny>c븱r[G9ˣ9unI*KʹF^UKzy Oe[_JPcBp$ZP|+ْV"WM(x&92[F/֏Oc F:~jSoZc~vdr$|@NI_E-[nJ/S-3#wg0DwGf='b.vp"5Z&SB WQMy -IPK1iݲg~,~yaI}~vfQ5 s)RN,Ct22ꐄepEǓ&҃فE //bf= o߾D4g:z2VH:GhExM`Fb |Z֯?Xsu#hO']]ł;PV(Cm!'>"J%SҐ;tmɠӻsU^CC*q@UPX9C-Y{\P{g_wHP{w")cJ(3!L%%2TIf"0xź셋Z$dO˄?_!exy^EeC ^SɮWj\2Tlu;X$hMN>1 e̎B.) sHQDCocx%̗󁯠sLy@aއX8)̣ݪjp҅FɉxVB"C;'p/}3k'/r;# 1Y'Uy8SKU0;%&r+d2.yӈITgKībQR84a?Tǿh]CkC[4(klv[40ճ939LVe 2͓%QŰ[y+{<[m9,Z܀SIma\ xAhɢb< ƼOJ9ʣt>^*.P(%Я00>_֛8%YjBR|9~,kKYUu9G?iY߇ˮb-f=dXG71ӳgqrUd$gdYLi_iÃp@~d26؜ |IR,GR|1#2͚g9]pa#,۶fD[£fݼfh=cogM.G_.+Nq:H˜G(5Č$J%[o*q' 4SHúHlQ؜<7FZzRҗ85x#x:s{յV-vy3^10wbJ +f٤T&bzi)dZg^denNˠ|C㇒;?&Tg>{Ҩ )ŏ%4N\'p~^%-=屷IIsҋZ2$IV' iCHYQ[' wAƿKdCmk4i..&y5-^Ik+ 7 QShɲfx9A-t<[`߿-L pQqUfp-A^sBuBH gURW༫\Ⱦa&Zv Hևz,_AAƐMrRHPN$ȯd.:1.8Jg<_ʚoc3pcYЦYZ (-O EQ<%B!m> Ȓ\*fS9`$1.˺N%3l,:P)'OTE hCd\}\ĚOib7+k"eeP Fz lsQ3ZTg6,wYwԂG-*7ds0%ҍ5t-AU:Y jH, "j+O$d40e`S :';[=-|ɥZeoNF,~V ?-?^}kvL ͘m(,8)~2~.uG"*ztS)eD?|n/!2"U9⤞ qKTxK&ѩRrG~6KWh6'xu.]\[m5Kh]et{d,hgSiV 7ѼESYCihQd:c)/K#p3GiƦ'hHO0.o5GMvQ]}_ng>fOFT򿟏bfΑx2ͼN--^G`m$#å3lLImhU<}/=|37*Q76j۽r9[:+"wme?l~Y{,X$Xb6aW!EQ(JXLK"qԩ鞪4Kq4xXٿ(tQmql$MO 2zŏ?lëO~x+ՋxO?=pV³!h/n_/y[nZ9ŭ}ՐMp/ϲu>'ROtxI@]EVl'ɢb=d:%zT;.mb_ṿ@eo7Gĝb$zI'#GY8B:(Ц'SJS0d%X(ɤ1~pNy]?{{^ZD/l0YreU z/-⫎1<(^i[Ú k:{URm0ر%v]η4ѫ\ ea2t-!v%Ox.*@,t+u9>e/ãaRMC]acW׮٥4@{y9%CZK~v=h!Vt0! _-k$K`g#\V9qd#Yr|%A^2V;obOh:~{e(p7e69qA~Gȥ &g{ĹW_o㏳ a_ovwKzN@o+Zkq qlJq:z0J0=9k6ꚏfz J: DbFDdXXﴣA忏,ز_ZBA*E\l! PK rQ9d`"Uڧwgnq66ƆwsXɖrOOoFW_4Q`Bi>N }IH-=Teŏ. G]MŻs~0V]ʾvlZڱ*J|P?O35']n$ޣԜ9lsU6A*T+ ٷ|5Zjx)8-CR)FZRB)FW鄋|)92Ss!YEVR5S1xFiGRS'y6IFꔑIi" )獧GkRT^L)K* ڬt˶ f@{A[rhV*m9Z+\xgD-O{Y Wd8/ 7bsιfg=vZ ͑(lO6^rݖ+vsӾb|?wPV޾;JƤJ&g9e$]Iz 雘:Z3vq\cN8" iM:msQP8 &QHs q08O8V ]̅ʅ+Dmg2zH/p۷W?28 GGocU&!Hg3RQY}r,9 F)T4!K1p=*Z*FdMeڻѴ5mEl'}aoj*U$^ vƎdzb ke݁ Fׁ)A"ycb l)F$IDEBe;(VmlHibDPĈ*\HFmJ|b-]b4w:ͥ0P+#7Y<9J&# R@yäb4iwx])F6VdnI)mEnPF0͊|5T,k㙘#I1ŐUťHZE\IdLC65MPG[N}z9ȋL"/xFO(YwzaMSN{; !Ry#/>/ e‡;f{ [e ZJhcoXؾ v?29?|n- \@ҵgy& 'N;TP\O4"6B>FHv{FHG!^lB^(m$|^uQo5Fesjbr떠aD_H:y; E‡NӼϪ!0⬮=g,q?}XO۪`ԙ[ħu&+.!BTb6"\Ԧd,Nb\I9-Ud [g8 IId#i.9)YSHC6x͖mvi:ݴ?A[>\|ѝGG=ut J67c>p]r])vz9"MXP6{ʆCo% = ymX %Qhz#J_5sIܞjG<.GDW ѣڱUC;P*zt%_5X/]㡫WUCy骡@WOE0hFDW"+%Ac>l(A]=EbS0ƣ\5jhUCi쁮 ]iCAQWKBZ)JzteXxvb}' EKSrO^m7m .Lv'B?9_&gC3zsqu& \3J/޽ݎ`Dw<ZvN ں큦M[*GDW !BW -hC))ҕspUbDtǣ\BW {uPy]=^`GDW ]5<jh/14"rQN}䗏=v0x7?] 3uEw+:ծCOzcFDW㡫׏wj(@WOƤ`FCWɏZ{uP]}bш4jpj{C=?]5꠮"]iDV~LtVUkX誡v (]=EĒDW:;dH=]A]=I2_<x6=~'0*emb3 89/gzv* 5,9kB+y};5ָA;|u-t׎]у^u'^Vj vz5}6e&GG=ߦyzd?n;jpttYKVEzrB(w; GtmES k:]ߴ!J2ĸ6^TJ$ wS+?oVWnԥ0tS2>Tve.J'jÆɢ5x>RF\< T,@v掹in0p@YͿUb7#wͷ顷 9sd+{"Bї URRZ8CDFX(*Cj\ ((WnQ%mY7eG%$Iwiy=ù?;-MhI_ѡn:9\F@m:&{.F lE\NBoQS-{)N6\;h!2HGKB I)PF@Eib2e%[U!&]cGnSg ຾Եhv͐hm)XtEGFIŜ0 I4#{[ILU:و]jݻ3HMT)GZ YV]PRX:=!0-s–AFc2c`ИU'7*&YkPtL'k! ִaE=˻"oML R,n@Zeۣ$X*X̰!+ #0o- Jku&_m`kfKJmw Avߎ. ?;9 \npօD2idȂ}GA#dO$6cys#j*%V62JZxN(cEUމj HzA J{@ZwCa+z7xS<l'P(+^8:k)1U$&! X .$ZB_2,'R\Bd/2oDԠ~1cmpЇ,ETP U 3h H7.A{U "=B^m..9R`$EBi&N ," & v*(:*kC Avy szD ˕3fq2F0oځҐ`eeBEhA20$E?\2"nQZ$F3C7a ?%90 ۔|sp`PggcDnKv)GKDƅ*)a;ekGXaJes|е?ve.ӔKJ=.x!>.<=>ץ]'fsj>;(junhr~ )yo<~ h+~| w=a}L0Q̥Z08Sb|JPpx\,{v:dWB}FI7tiK a :fDS&9!+-t"`4|IdbN#f^p}!4႑ctZ K`>w%k]mcPΘjyo} ]]>i%rjBՏY܄|(4U\M2Ӗғ%Ou쏟<>*8S$N^ AıNU9\5ӆq:GcZp0_=#y/O{Ԕ7FG{|tɸp?iI8kaD N,C xHFn p.: Wf.In!UKKD`{ـjR(CHz8$tՅ+zRw 0K5y@;ϓt.J.%e$Gn0j(?,^777wKkp~[8i3:n\M(>,9{;:5lQɃ'ʣޡvq׿z̸n.yRͷ%0}dyX=tWW_SOCxz]~˻wca?c;^Sp*g,+y{wn?7Mv_on7<_±= MwvW8^mk%wgH_޺Www3!mpՓ^n)3mSfBט^or/:z~ʎEEicIG[)b( @YP,e( @YP,e( @YP,e( @YP,e( @YP,e( @YPzP,mSnb٦(m߹o@, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@Zo() ( QD5I 5eZc`I KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$z@b֔~mkNj@$(ZhI#fIKYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%, dI KYȒ@$%֓zK3}yݵ,5/?,i_wn^}z$qMUQpIkV\..^-%zpI(m u~3E^iU!7:-Լ2[r<)ף+ Aș/FWġ G(uYWp㙛E+(}c->u}pE`f5k]WBItB]QUEpU7f-\Z(?gZB(GEwYXsוPbZ4=p^E_e"] p nU \_XFIU|.~涻_5.xu=Ǧ67>A:ۻ78 ?m [eOθ{j~Džte4<` g>^>}y{َ훟yGp"Á[jz(0ߚ}nJ?,8;!}$n;̟6?]~_=|}:'}7_cf?v1q[3P uz5\Zt%e1ə]'}me#] tW2sWWhLQ9@WtCk)*ҕעFWeJh}Jʦ*'Upvzt%^Mu%ZוPFoZB->"]\R+U\#ZF\y"] pjt\ZוPbZ0"] pjt%iѕujhi5~a8B8jwvɑR7[M;ߺUM/JJt] .E-Q&xGJST+k]WBt芟8||݋ +lmsaՋ*E:ccciyEEV|tyN1H\HˎhZhSl]BY ^QRR,ѕRҢ+Ժj* ֟JW] mvJ(Zj."] pֳkѢKl3bl5T~&klE;,Rm@hdmo)-vk{ӿ)G-%P |؅. ]qhz 읚G G BlqhE-dVE=\Zt%gʚMW+Uu'HW&Ejѕn]WBMW_\`k9{%sGុ\F{fpwUYbԡ.9Nt%5pZt%>+dZȕDt%JpsТ+-u] e*xJU+g$f5ՕVߺ@I.V#V+~rTܢFWB[oAV+\IӣvNzt%zeZx8_$y~?wl $ Nv P }gID#kӆii]]Wկ$]$>էW.GB \%q>JǮ@JOj~L2 =SATҍ$X t@03#ĕP`:I;*AJ?AV B*WIZf DW \i> 0W*IKP*IIN]{bށvXЧO WI+hnRv;\#\21.oo L \%i:\%)%9'WJF1=JʃaW mɎUkZkt@pEBB \%q?*{ iS] W\1F9$JÉ]%q=JHj?G+Θ‡JÉ]%qH:\%)8էWZ-:}[ 2ArNjJq٭DʅA;>vnSwtob٭9v!^ːn]+ʍ(‹Eg)XGGXL2zhL?ʄfVb,ԨSr_݌4OO^>\eOOq(ulZFի߇Ԕ+R[1/b*A*H ZޗwTg8y{JeRϵDxsX*W2ʣ&bo8=7-0[ Y־ms#e']N|/X'\6Oi8; enh"ӓ?OOOٝ(ӤՓˡ~I9 ) 4Kr ,)_g**(\)H׉J,|t?7KWcЌ)IᦗMr1_弩jBwln#؀_g{5ľ5)}< 노$l]/[~04˜t~?J .#P52/ #p-mQCH1,xR]9\fۥN[{W7 #CUpPoyn9`QmB9Qj0. -J))$Q''}ꗺ}-~l*p_.y0U?*knTA L4frUjFՃTƿjE>TG3;_X{+Ypd}x}vM^B{hƣ;.*|>I"Wfڇ,7wW;]0-:鮨ոj&R5ͧhۑ kNRR(겦A^ױ8+0 ~$-f4J`ϋTH0uo)4Q{5\Y !lRDa"{⁖!I>Aq92e4ܮt AQv:\5{ a]5j:e.c"Ozsr~r@_?P)nCkF91sG̣c$KD+`JՎpvd?kϏ†%=_Fvt-ijA>Z]mID[bxO%KĐ_ 8PG\S2^ 08x80dQ%P%k$hiLG eZ30{-#chnDL [abX{!CTI'SdV52lA-_L5azˢT'|gž8C/RMhlbs8̤Qj男j! b 0r~]&groDʵl󰡆ӊ-lc3]1@" R>IY}hAHk)b.&0ɳmR^O&(IzDA'gT|kcwr3[P\VQكZ!\{j)1[Hc":H DsvAm(GkQJH11Gm7A*3Fy*aak/ʖPQp)Q21zi.~8?tQGrwGl2{5E@* B1-n@h`^l @l#88f!3 hG9AQNd#ȁS1-ak"x NA@ s>8i"V"hLHYbNrV"V!!Ɓ3uS/ g@HKsI&̥ՙ-#bk<j XsR5-n:xGe+)((0ktDC4kqq1p3xؖvt~x>򺜅j|0x9k`V귅xJy[-޸^FQ-xbqS~R,*7=sE1N Voz +tóbګ0_,]UP#DV-ZE>e<2(йmqoe\ۏYT6ELy#=c*RsT[4? \#?lewN6 Ͳ;9韬|!)T5.T<YSM2GYQ--J*mZRdnE8+тYRmr0y p\ gŗ c\iM|W*#%eOaiad@ sim-[;kCnTh]2D iPlDoz'j +a7K`R\^(YsT̽v&$Z?.Lu|wա-"(k  ^rLIeQIa#ǞH0nA;FI[G"BGꥦ0GL #0рG!eLDxJkl}j'uֺ=C3ןk7̆(_79}ߏ>bP#1OkA,,8X"6Q#cE8@"’pi\"U#ƒE*L<&Dc`Bـ2DpCF,s$k{sskKo9F,(bɴt1,:`.JJx'xyhkKw<8np9"Cl\H˜F匠[BI(e{FݎA%ߣƠ0N j/Y6D[ tjCu&& $db8d AB:9LXK wZITRPacX1bvȝhXY\=v1+>+HC=Ǹr2HdQa:O=Ɩ j4")!n9`+|rҕ}Ğd}@g[ϸ&}=QH*$?xM0e)BP3mtm4"M;zYefZ?ȨQMEڱ*.-, 2P1֫tֿҁe^[m|Q0!W0jveZXgma`#ȷ5qhI!Clpðze !6#FnAg6k@AR>*#81`h4AYu(hIx:S o:ZP#oR#sup&]&A0N?H˨pfCp1Q(, ~*f܌=廐#9)xJBϷ & c_YPOY+pRH0E7ARP뙲 vt+g0%P0 *:mq\:$UX/5m{lΒq%7˶0>- Au61T)nr< bo>]{Z*!Ӫ&ndj^Vl~q<4=[Ѩj%o5)V!7V/_^4a.%Ka.8o떥zղH6._&}ʷlQTKb|}I!Qik0YEOЂ(^G׋.We/*R E%7ci hXXH*|SLk.ː1K=fTNRg6ׯ37:~`t߾Lߞ= ug}#g`PmA~?}wNjw(U4(*EYyoS.mʽk>ʙ[o/?K+jՌGhYMș_|3Xu,*-Vx۾ɛ4Q.qd3;&^7Vw_;HQ}$>qɧ[ꃉ!7"P¤sVc#D,8buQ0=H;ذ?p`5Fx"?]mo#9r+| |)rI.˧`cy%۳A[-ےݒlSVӳWMdbS.92KinR~ &٬|$yܣ +A~<өr3jss>P#RQ{$" X&'~#cF0 d* %G=dcpiCW4K :>ME,z.&WԮl_iG 'oKn }w}E)?V<wcɯj{?҅eT *S·u-mK{m7ii%3{.znn @;z9.Ile8eAVɢ@5©ﳺ\us9rH4O&9"q$W>mQo\0 G+(PJ`kW7Ky8^#u> ~-]w]ףrhi8K{tmz$ 4 nuuh{z-޶ٞm.%ٺ{^0{ l:sһgeTveyvo鸣##!So8_6ys]פs)RF@$/;qvwR 2IA ƃM k1m՘Ҙ'Ɔb>S"m13x&{: *r%NtE C^1j1y8qsd%@IG˹J) P"d1*g*c`9 -ڨ<"8 eps4h]´9R9#bPD8O] CM XM=UtJĘFDQ$o 4Agr@d^e+cͰ#L"8' ea9'ˁ 9oͼ4(*;Ŧ+$ M듻9`s 2w|g35(6gF܁sR 'w @s5<t6ŃMɃh@X)N%+5$Ry-E&Wx1yE=>l#:J RON=E m>Q'''@x8YWR$1ջz8|WGz 8% FYLA2Z#ӄIz!&Z9sYF2JZF8:9_g;@\ŦW(6]t_??1fP^v)!yCu[I``m-rVi)Au[úF#njr~VcRc&97.FtO0|Yr쓍ٯ y8 =X5*eSEb)- Td'unU /:_P˖v 4/6ϗgvi7v~Ѧ1pW;~jsÙR'Z1rii5m)b@2S7\:GPr7G0*(y0$yMZDɤԅ0j") 5&VU @:&ey,l)5dP8%;ύ R RdQY1Tgbp̀4}? ͜|8٭#u/Zlg藨t7P7=sqӔ|i<?*!ԑ)ńg9{'R1-1%LFFH8ލήNzzUϨyޝ gp^c5hZHHRJ4QT<p^ZDheD9ΥLJY0dȸA$lAzhy2[Czܑ>XĐ}C*gl*x99% A!iJBQtm0q1f%ҫ ˡV$VYquՐ+kRK4;Xcn=)b].BOYT# `e7IۓN*̧JƁ'I%I《 +ER3 Q D $ S6, *L+9.`ݰX#Ny>c:s/Tv,E-se[M]["\.ڽYubܿ٦%i5~m`XVh՛A*Y1#^=^҈x8q;6ˤޮbyL(3,#lʲ XrH./ {ݿ­Pvm)MH $ϛ/y1ņS[:'w\J}GU<79D+1[lUYr0.Ƞa1$o|:|fvvYٕф9 {+2(G~~W0hR&oil 9x)\ Ajܵ2Ls_dBz~18`|""h,V(K[DJ:OQ%t⬄ M§!R˳[|v˫7ER\΀+= -h1(gRĬȝ:[iv=]2imM^/^VYA+#vodŘٛ}CtvAC0(u|UIJ4N%XdSOAR# 9@JFy#8-\d{itV a:jTFqdTi%[Cv XZMrI,k0%^z>i}mp4_G>.Gtt tLc}tt|,ʭ l+7=8v2Cf8Qϒr󳠞9aC([&ý:ZG%.%]`Ff0jWbSBdО!h%"*^5qCp~=lI2Y wT$9 a>> c-VTVy*u4~V 2"dĕ7ڒmT,LΨvqEZ4j =ޡCZ*i2=5&h4hYk8;[= JJޖ(%Pe5v\΢4Ә+NQY-:yަ-+&  CKe9cQц#I|]$/< SN=0ĢP)ŬHȍVm]@Ò몝Eؠv#ȱReq@d򩭕)Ȯ dq\cZcPkN\cvsg*N Πx wc\^ C 'JJg/hmܓv/|ha+M~PXp* /ٷ J]Lt:?0_2iWeЋ$s΅h]i&Iy٧@*]g톸"έke>-<96>(EBEnkЮ3"{%߾?,s-6N=קIG~6}bӒby@-N.||? &ˏE5zI&7-[ 7ڍoE-Qg9kαE/8b 8)+8pBar̙@vc dtl|(!rd{bgcb` G&DJV 20(Ƴ`̹yU'TԡT.HR7|9Sΐ5 E; F&sjl7IC7+zH1n7ĵ̛voHREliZq">El6PŅb(ϻٛ/'g킌<IH3Ti{0Wxc0Iqd cJ$RZQ1z 7Aمr'NC:x?v;5#`/z64mW<U :Ci$:[i'9JUkgs|tQ;ۡ+Y9+649\誣u骣K ow ]uFυ::^NW~J?q;s_^ ~h+^(:0u+}F+?#̆:\KsօC2#]A2L8+D٨ȡUGHWoڻkfCWڮl.tѲ:tZ GztM]p0v6tr ]JUGIHWo;tZVþ%]}[iR~ "ϋI=/oN?K &Ƅ(` /!ۻu ^s,a|?a;Oc{Ql\'b2srhwnWonC>}G4S&q̷돧mϧeo1h}!~?x?l\bnYBojw6_Q3v 2C>.9-|/o^ֹwRG= +q:Wޯ'Vm`~c?O}npm͌]{%ź5Bn6k^0RU؉׊ʶo>-,@Ey"Gp|^-#yxBx^yU}uy}VG3 qzrdL~{o~~:|9ؓj4ϑȈoU[O%.&i[RDVg+m֑oy7_O7H1!>d}_?^C;_kc)pz^/wp9ZӢJl \}K٩)Γd訄XK0uŚz}I.k_IFꘉT,}m#.TL.d9Ul= Nbzg?/4F6®6UvMС8`qMl*ip-m1h9t{NkEu`(jfVtl)AuQZO!j,Z%Z9.Id-:R5ǪT 9"{, bɌalXhlm@Pʦ?_3gU}h9Ũ+^ᑈflZFC@SHF)vhmHsnEld ChtJ CQ4zr@#biͻCsEd7{]7B{Gz]F$$x\.c7Yyc& -[@!!X2(֧!J8jmx:oB=v֔cmNDTE)QjjւZ)w:r_~J>AKC`ku 9GYG7(}m 2"eqAl`5 j dDHȜ'"T# JPG"t.~Ҟɳ*.zчlTS*!ou\ Y+dnL.9(R6Q 0ijA 1 oTTtC`ڨP/ޅq*X/l).(<%݊ NWh sVAk!O=4ن:8ܤaD9rEb-+q2I}QD:7p,ji ,q%Y'eTߊT*L:4.M+TR\m/S ! 8$F*q(JƬE5,}ȡm2" ŕ]b-PP]-hEآy ,lC?KnRHQEDf0VYq266&T2z:L d>^Aq**ui𯜠2 $_XqVQLF˓V{VS J+hM0j3|9aq2M!,V<(PO $ ( "*Z6nYQY'ys3 5%@x`;i;ʇ q b`ήL. Ls.%@ qPVDi !m7+@Txo.>æ;(E@(ƍA(8gY9w@m9@G>+=N.G[B˥{ wS`yE n8/,,tB\X˰) >@H&50*2GZ0҅q0F@0/> ݗLd:y[ʽQx3$nCe6⥇չI,TG7Ÿgoj@"DU6xY+ jw ,wL%F,,5v-oV+)!@"}R:$۷sΤ$1S"kU(M6@wpDlxZ~,0n**DŽpU#PB`Vv"BjͥVxM>C:iϢ;k4Q9F&A pf9v᭛}cKᦥ_z+RpFBZFVH¢(h` H%/Vp6{`_Rm0dg@ӡ>aun9vg?i MV=*588Cӿ{BwGbj֘M}Цs5JV ]"Yψ2oGC[O*3B& 9HJ e؇\`Mr9VASF; 1lrH3 C)7 +p}VnV(#+Ej.UJ \ $\F^P'.# UA {_0ۢ 8UnFB,Cƫ:H)-й ᧀPF% c<4'9ژ%ܤx\+5iՃ*U|~ͤ:@L\@HIBL'k@ה=kIO;[iӧPc&E%V4p'B8Z(U.L`(k<0hZH\iԋaBju4ASQAz|d{XO(8q˾ %..I6[1xB< \8 :^1KLUjԇ X9eTǤh&2#IESt\HavBqցR*E$q^}!~-nY/\ C-iBe`uHI b/9sz;Wq pԟoxRP~f޿Wxs{Uw]_|{u}Stۺz_}@.c|xo}>T[8wY8 {_9_ [/EGr[c\\dRdY r\Ӈu伻{jZe8-w6ş/o޹q$ gos@k/؋!S"XYwS225H 8ÞUCKUsSOCO-&0|KISZ nҸηN{%m ۬mnܠ{lu8鵩2Mw\V"bէ J W,(pUX"\Q"V7EW-W\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pՓW ,1m+Ǝ*q8તcу@J%zWjB +B +B +B +B +B +B +B +B +B +B +B 'Lp%(\ pJ{HW/hr7W\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!pW\!p( IyT,k\- p*vHi8W/R2B +B +B +B +B +B +B +B +B +B +B +B NO f|'ޫKFobH Ì[wH)6SzkA1a Çx4C)A>%a=+1+}4檈\i  4WCŽ\"Pb*R*^2hxyy?úhʍ2,`+_|}5@PKTd^9NyeY&km̾'']<3C7΋8_?-F6( j@JIENN(h>E[LDʈaR$o/Y>ưPqKR2z}TO" Y02]-:ajw[oWk^<}XO}@w57s? ru7_RMZH#[jAK۳Y*QVm Ҧǽ!7)giZ]x2M\-gH, Ai 𕉜VB:RIL&pSˉE<6m_xfy:/l17f4L+YӱvY> E;"IN#<<ْךż*K5 ͗Cߦ+B# 2 S˃`0em- lNT2U)FZ6`Xۡt&n]do#3UJ娄YuɥRmJQZq2+ 0M;4`*aI^j ZD;MئJxPahTi*@BHҗ]*b2Qɜ:v@:6NatƇѯZצŨ۲mrB|gՅkV#nOAA" }Dt7ƾhp'MG߂rt`6m.__]LyS~:zVTV:P d~tnJۣnC&0KuHyIM{RCܫǞhp/Y 2gLP"S-l4iRZ5W1L!%!tV*Ίt&n,tuB8{Ƶ&G, vIa&͊Xh)vQB]:%~_E]# aݺkt1T]K.SMSdAzѥѰ^_t~, sJojO~ t?@=)PC7TB 1\yahEW>ZB3YAI:u?`i;t?;cB)+q>ݿ73!>f+8azN{!Vzr1=g]y2 \ÏUl!^Li it s$&Y0R8 o!SP:e JW~Wh73,~a%zS喸#g,\WuTaXWS\ɺgI*D+JJKqa@33agHV!lr[#Ȍ$diNJH~\zuMUhg<0j7l?ߴ{ߒMoYLEm . .X6RTRQ4AjN>NQr٩vQɿCEL݊ ]-MCצ]=vEo)(\2?ˍ9"LjfwSOh '۫QɔzmØ2|kcc;Z*df>77GjwϨYcy^{tS^J;|"gNGGJzQb{-cg<Wv62c[>-\HTmr]Dy*RcFARjV`HHY .FptDgx %s=㬘Mt(e:ClGL$ʥbtЦ$mhWB̃]}jnҕ I`2B&Ŵ#0^h7m񜵻%^ԝ,vIDRQI3A٢>p>fY6J5&ipg춇mP_qS1cZD۱Eh"nek䥊:JPi F MeF%+3^(M(RgM;"yQ{M5SCli. vXe\-9;cT<;ZV 甝Xh.=jt=ܳEmMjߑ;gb 4i$8~|GEg?nJslQoQG[HWN:S Fr-I\왶 !%ߓu{mDA^-HXeUTL@<ɂKjI"*﹓2F+wqSęMo*'u7v;g[o Nj夃X2&%R4$lGJ=WGy&JzCl"2CD4:Ğ D W#.b&g rDMfE\ne"䇾tR ln6-L0 B1)WS+Z,xY'Ua4 <2huH8ed޺ UWaz{m.)ne$>5:ox,wZtaX-Y64IsI%>d kM, I9];[U LĴ< " NdcE;9\vK49P,ڝ9p}>N4LS/\$Bɚr/eHB.,-] F5iTDJɃg,v/|gZυ,dtpN[dt!qi)FaCJTVK TniB 'JT 9uѤY8!GKB,x< /7N&f4kf8-$r޻_.0³zrp9 7EOTu=NPCW2IݔqB e0J('䊜k95Mۓf&$ l5;6Q$9* $x"RMZ//čou8kge ᤈOz_F.sCwZ3Ҟ>okiry_dZm.Y'!'zc[e]8q R1c|tʝ1ST=zr1ͪc0#e a.zŴg]Ho8z] E[AȦ+ JB˰͗1/:Tނ4 zbyyG7k֗dl&7]uӽRIe۫6%45\Xa4&FWFh#_6*5`LsX=}滟/}_?o(3go۟^opQh($}\7~ƥeW/͍xg.oyu?U./6*{^|=M(4v /ߙ*z{qrR70kRYC.ckB {_ <ض)fY w⋭]_b$13/O5vvJMoqu&hY9<|Ts_ gUe]U~]ƄXdMCFV'l淚x"ݟ]Pޟw=lbpߵi-yó? tqc( Ųb"}҂o,٨<Qgj{_Sr6@ /_mؒ"ɞ8WnY%eI`{nMTG%]L01qqupeL__Zǻ88A~! ֿ/ւ}7cѧds+lv@%<|%Xl}wrs;v7VZ3nꑍFXݛmAZAZ2ސ]p23lVK|/r$.zXA Ӛ#"rֱ$'LX/X|@YyЋ_ܳ%X#Q'HI抑`AEL10%H:"*&kg6v̜̟.xsJ5}qrЦzJO"d7}}WD'7lӳ =e)!aZE/EĊع8%KB%K왲|YbæDDR3Uji^+-@ Lu>>XSR-,"Te#rX"I3.M?X'WONg(A +HTȝ "锨ăCx gRpƽٽ'gY7bcAzk[Ey[Ie@Ow}Ϛ?0˱ HWZH"t)@{V>^{ޫf)"4:F1PzI|># 絤h!PF4fNP~U6F)LM.x[GB,J13YíN*M-#4\QooSc"$^թ!GC7׵1F@3 q'Ϭ-z6=Mbϴ-ӋxigyǯsDpkJ|jCq^%WY8%*m!;h"5ʞ#fMIx?dψx%x5 S/f݄sj6C2 3WN<}z?8D714gua NɢXwl:YgoqrcU.ud/v =cʮm_]0`%|nMuZL{hh>:BF gC&OMՖxТk׷hnoov;ǫLǛ^j-^YKEq%qdfksӹOgh?>_a}wWxw|wܽy|4ƷY7lgnzZ^BڵW9-7·x.e߶|\jO4-+8Vz!nj.uۨ|{^3s>Ϡ_>T8.O̵W+*yRo`m>VMY=6Vj:܂.p#U -)G7 gҿL0Ǣ"< [1S4m0$%fife&PV; A:c\k\˻z b[i#1Hzv&NV/S<wǕܫ1ry3X Vl;Ako>MFSf`UST2~N* >}]Ƹ?}Dm=ˈxLZ+eRq뢖L nL:w5qٻ >|M ?Ȼ"*@mc+u8vx6/|8Zk5jݰ8fׇGM+뀡:h;h8Jq]7̳͙I>wtĽllNFNafPkʟ ^MlJXpz颏jMx\q1(еA9{fo&&TjJ+W3[IE#B1h^_ WYgc;~JXAQA'FgA4BA0apZKt׳1 qC 4dj2PL (L :J,YЛ8OԉQ{6C#jtS>NQns5ՐN9 O[`}pիGvO7-^ vJO PR 8$XwI %<#yex[.mojizE]3pž9}#)epvJhWs_ q> WVDT"*ZD)+N1pkN1J'F$Cq%@PAj82 h5Vlz`D:em 5fɥQ:,<ɾ:Y|^1=9tt tĴwt"|gG4!~QS&C+NWZ%S~)h )Ia ~&/c.`F؏Fs~[Q E1bPB=,$;5'\ZHb* JA'T(nj3\po yv/s]% آR<7Q9zKr6^dc@ ̑ښ\ɩLr(.PXӏONS4ϛ<q~dI[6wIjFr(N%z$H\"OHEdne)@qXsOA4u9%DpQx|H&Qq{lK=:닲{[Guő,zC5-8d0P;-{ZE /noz^hq5έ@H  + *DD](F'Hޝ+j|w5Ƃ陌O+2zA̍/_Bl8JUΒ6ǦPI1-+89E*Jv.`j/./7"roAhk_oes9=ϕxzG]WP&{Ϡ[<~3VP=F'm]]լ;s.[oln$tqWΝ\jI؂6i^._YRHl~>L`:nՐeztcWz9IZPƴR昿x苎v%1۵/쉙EZcxo4@6T`/qr) )u5oI[}(t0׮ȯ `3ijg'wJGي=hXQBwsJGýWYx13y{5=WCt Xg=t1&M ,RβxA[ql t[m͚i2dnhbӯeB(znV\2m Dw(a^]0Rj.2x $Uе!]<0/wώ,bȼz8+ Ge*Ю4#I @$fFHeZX2ҠWheCH+΋Ey5 _o伽͍5W7ս ?jo ;Qs{ąGoyPʉI%S@ ^\z5]gh=]U[໕}m P? %AD?}`:7\ 8qZQ5^ x/1Jp~~&#KX0dq_@'F6Ȓ0|,Poʠ^UذQW('C 8vu/P]i RWY`cNF]iYeOE]ei:vu/P]# +XK]eq>uy*K)IQWoF]-'pWW lԁNUW;I+nRlGuܡǗ ',tUWSQWYZÎ]]e@eQW_D(yJ NF]eqm &ݤ/Q]ip9>m<`b<-b-\edfgmmT.@Z8$^YNy%$5-~ǖ߽_x!x;9Slǿ?.T}Z,C Sk\Yy _ˆ,hP\xJ Lx!"g-ub`2az[H )Ta-z[Xo ma}lc.朜o. 沸dN>QZ}sYJ7qL!.)$݅tB] .$݅tB]H Iw*\t Iw!.LtB]H)$݅tB]H Iw!.$݅tB]H Iw!.$݅tB]H Iw!.$sSH Iw!.$݅tB]H Iw!.$')-9eLZ٣B?۳fm宦!I\kDňs+CFm: /J'IˣK*Ρd?iX0Ǣ"1tQ-& mQi LIxpQեcۙ8 oW?;;_BsۉqvNkt O4>V0j?7A>^ulsh|6ˤl2n.ޝ:eur 0(j<%غ _;KuO[oJمm֫NQgYnx8vyQYQYK>xVeC62FIJd:֕y 4?R"V7EZ zgnw^YSuaNKh M  ݄Pm+H$&KHR:*zj/`-򮖔EE,]hU~I^ ^Az! UrހIj!K# mJd"L>[^ Ek:#TcWktMh8qu*JN"6ڄ1P-eb-P,IJ$%zU:Kes?d ܲ} ջ ɘ6y#8}z9E?yY_DSJ~ٿ ח6&WJw:a~Z'$~qžW28qR4ch%BdmyH~gyKZD݋9 AAݭ_۾H|l.N 6;;մq8VPm[^k^ #oh ,9<,=ߙ]^sσy9ˋPhP{S*t|48\^D=g|1X?xV=tӼ()|"~|vr[]6(iP7QKSDO,[F-C2-3  >R`:\r1m/t6!)$/m C7Qϑ+f[ 8[wi{nng܄'C uO߽wy1fhHSNڦ\O6|R)^F̡4%2E 2MaD| S$+UI"t1ו8kZd2+^=[:8Zl 88ž@bnnސ&]"$&߀(AIpz oҵ|{%];et$˷w0,sɪbJ"ZE( 2CLLBBQ;-1x>x`kS U- W۬bYI/\Q:r"&C,eKDkT}ҐVU9Ďpo.j>X?_џq6޵ uH.R仇au,WvݮN `c W!*ؚt`YσeIoeQcqښ,XŒN(*GT1fI`FwMƈ![]!c1+kzH"2MvvXn]1\Yy@U$TU*1 BIj#P^ {mMâfΖs 8&⼄#ޖG+Ym8t/ y[gU|)e2A5j{ʃ{3uGӫkm]TyrvM`0 0gp\FÓjΑx2rVY}ɋo5#Bl Ƒl0j0~07kVLIkx,]|2]-'zz=l8vsoU.orӨU)jiԁ /?M.R2FXdr??(l5ņAr?O?ϿzOJ߾o_w? (pQh(| 痿|Цy˻^LC]q?Um7ĖjmH7_.?0?;jzϜk wA^~f5?7 D+՟J^^F:˭ M}1]ّ}RT@}ֻ[b+?%>#ɽr$qkV %LLPQhal%mL)E21)0 @GΆkZPD 0 8E;O3sN*PN'tzٚx;;:*]*x6:x0wL-9 n'ݻG?6O>7 *t[>Ә;Fnnj}k\hot= 'BZ9VY]a&x^]hlq X)5C,jDM" # BA;Et"c`;뾐Oq%4D0Jfm:%rȂiA$Y{1h + @d\*@S7Է[Sg-mf+|DnLyO]Sߋ{{Э<4{< yQf^ sڔO)ҤGTMZI8o#Qi6(^gaP|XtdMwAkdFR J9@dMa裗H#Hs 1 ]dJ h]Tc`ؙ8|d*WONj6)@UI "- 4T:FU5\h;̮0$@[mDWodoPXVǮVwljoK“" Y =x'6`4*3&K'wm*Bn`݋mBlWt~{)t9%3Xc#R839gΑ%e  ͤ]*h 'T+С&xA,Yę%>RQ]Q)wե;1v'Y]ftfroDߛIz8)Qi uzgijiw^.Xా]LVWOv3UhâMކ%4 gZ{1)&zyuQU. jqeÏ64 v'Wܭostk2\.Ehz[ӿ⯀7F~'`r8mk6dhi'b unz~8I;a=\y4m&mrj;|ͤD7{?Q~u6PTp~nfecx'/`0UU' /i^LJ:WV݀ۚ=$Q5k 5{}(eY:@E]`bȢ,q=U:jrkZ>dUg=nN+V3[@}@MBIBiUY hҩΓf"^m&]8QNb̓ -,`LsE%l(Hs9SI,IJ@-IpJ cRV] 0WkK%*@i<'9'qGxN܈p Jx9eE)/ X 0/FԎQ3>jGR;ˆ̛ M\%z&sS㈔s2RX^Ҋ(g9]v<4/AR8/B836PLPVIZ:P##\~W+WFȔ/m@aujQCoyq5A'V͌jjt gbPW88}o҇_-R( 40P?s w?DΣT|{ 3b>L6=55޿E 94^'4=1>tp>rNP k98 }34Z1I[F?66\"%{h3l꼼 ~L޵h)w_kর][J[-6[iݚOhg?WǶ?<]M뜯=Upgwnc^O,IΝbqW-!m^wxmLG 黦-C7 ^v{2zժ}%rA-70ym6-4xv }\d\-SjEg5ҽ/e{_$ʨp5 P U*q5B\YO W(ئrJR}x\JI3 āU/ΈqO"㪟\=uO1ગJK"Õ+qЪҐn'EdprLWVq*͸!$L&++l*BƎ+P) ϸ!؄p` >Bpju L_f\WB)KTBB2+BZŎ+TiTq%[@&\\RU,v\J2F+;uL1dA"v _bhMuo̡>ic1m:Ve#פi(QWSȭ6}6.z x~yvj:;r;=ˮZ%uU2Bp (0WR)eGUʕJ>Ԟ^1-K0B I(MpEur}>{Ǿ,UM yUҪ–.cB臏'2JRZ@$b\FYoE,!RbVh TZP;! OJufTI0 9`d0jӨR慛1bPKLW(XcU\̶Tk_Ҍʢ?:3%+ku*Jq*ug$*u\^$Z ^j?z`gZ6T~*idU\V=e)3W  P.KWv-T*q5B\1Ξp9OW(wJk2F+ΥQ&!\`t2BB+TPq%U%Xk PMq*evLj+)5)YWRp"l2B*gAPF'&$)]`ur5MWMV;@-pvӘv7L̈́)e3CRa'ux6tJI*FFŎiTFi#d*!\`Њ(: S?FĎ+Pȸ!TpSMWF+P)pz}FUl%X\10*AK?9tC*f)MW(Xdpr^vlTZq5B\1 K-UT+T;~*ʸ!ZpHW(WTpjPGd2jZP@02\\uj9 U0GS0prtjӹjbSL{֫-ϵL>5k(i vآHP:lJ}Gzې=rtԂ2tnPn:P~jUm1Z&+i:B+T+MBZg\WXdBΌrIW6]|&j<ǐ`*X2B+TkwA%j2V9#V);xtב ̝SJu]C?ow7-ŏ؂vU[Óˋ?"9%QԼHVףSb֦~>ܴV̓B 쮽~W4ˠjQIaUе*T\QTJӝ%f.󢷼X `{(|ypWō8#t I5%r\e1LX'ҔtexqWLFҾ?[zN۸Kۆ7]C|jYjslQw]f6_7Wfqq}m`r}{,*xyڕΛ^xr~3y<^=uSמxE$ФTURJbD\M+e56Ԟ`B.Z8UqWB6TejJk.K8鴯jNAN&oij@4w:eLUS`0җ@HI5Tx*͠v&Jyjj\I%sgyβ nq^.."a4yvX߼C-;ʜpFomngW|gX=s<0i4SW8͠I]FYpveeT\4_#҈2'1~/g\[4̦|r6/=] \C]4n_j1gWaS}M:fp~򫽺M};m# ޿ح;5gg&oP+,^.N66CuX|4;Nov1޵6E]8G ~ٽβdz辶([E) cbWhq^GOq14 Z,n8(S Fgny75J5.?#hø?_'q1X|bP:#w-߱ gU_`pqdӺu+"MCqqflobGZ^WSCOCgnGdSS2`xwcC_Fݤpp1a5$NG>[㵠n9ǃ4]簣 #ՠ $`"pQ߼3&H<2` !uՂ:8S*6|ծ-mt4z7!8x ~|} K4q06vriO_N'/xu,dr͑\ƌ[\a/[e-ln8~v9 ԐQ{;*r1gxW!\ܮeuv `ÄeQ;I5J{Hu&H {JBx?ZY]nrKQ[E;~R0qeǪ]ȨB(L1cB^M}d렾V=a%(at\`R3B5xАs)7F+BO_IkgW@NI[yrycC5j6xggbJo&X.aαHZH"tDqԜw;Q3ޝ8t9 J vIuށhv{ũDY`FmpK)gFmR$v]L[HY"!("JjN*3kMv8)^ҨEy-icDWش;x6 J/ B?ȍ&~z{y]"jyz;M';Gn>g~Ae*?̛0.JoSrfC"* NcWhBcȾ#or7喞bs3n:Uҡ95#  >(x]PB1V3N/dQFZI@~2[jZJkXP}pex ΛFn8 b붋_[12=%O(0:G:Y75QQؖQ U9ڙUMk[ny"dr8XvJhX]Z]k^S\e(mqk3/:Kָ&#Sfl;ŗ1غŪ~؝mEexf)[زfksƇW$Hh>̦=W;ɜG,Ѭz@pzN05׏n4}XlSl?{Z"I_Ю13lqen{iC 'O엟\ ~GH]@pB)-z?kޥ_P;FQw61\ [%,9N5yςi}4LʸRp+iD$.}b"\ r!4L(IaMQP)*8uEOd—vamͱGGgBG@#xD9FEE#a2PhtU2WۃE]Bǣ.[Qq/βYzkM9Q}wb ,p 6IjDL*e6F3kRs.l^xkZl_n; nɍmI9Pq'D0` 23 &q1h}"M^Ozpr5{3e:+ts͂MJXPI?SP\sVIl@cA)d kV^3N\p]C3C#W ‧Y\MJqp4uF9Xۼ8}1e BHJrmM.PK\Lr(.X3-BozjMOoy:G`xf4 $[BAĥXXIUoyZ<[?I)<33[wsp d]b \IDޤ63ig^g{ѯV}'s#\Тf6m )"P6 <+^jÈ\5z./TY_fgm I$eLi xD[(F'Hhޝ1TZ# !lz3y~9h8qԄ䥋3 FtI{E /bf-n6/#LrI(8Xz#^ eAʆWFgO<.nr|ksȃ7S8hwTt˕1ʘORN~O3ϔ)-,=|1fv -$IkM(q|auEh&RiN$;$-;$r4͑4,cQpG  MF+L${E < 9vbMG SL1.5.'ZZ\F0eۚ:MKD/nnPmysVXr?h0j? ǫg«R9 Y'V'Un8QѪ5OY!f SO[df^3Vh'Ki(,|猟u1^EvvOz͸}{P-oc -U)Hä!dSF nFXp\ ouhc$|:w|35.ELF >-*Ú TJh\Hi,);))?S5TwGR>9fd(׹)xf|p/lp86J,u$8b$ePjMJ$`OWi+P(I!F Rі]筩s}f9%mfMFȖa>*͎}=GKRg@ ߩH;aL 8 JkR|T#Fv0;9o)c$Qଶ2D&Bp ZiM9WJwVJr&THTJHmn*QsL"5I* JA7ߘA6uu}zjZ$5qd=*:4S9P' &iKn*8몜~5֩DR( SN(h*$-3HpΐW 8'_cZcs,|Wگ:t:{A;L@ci$0n/3SZe .?BJk*H_@~2 C|.EG1(wnqUפSz~2if?F:pE:iϥ΂ #'(9 m՚NvCP igRi6r&9T"it*Ř[\TvJ[^;qWg"սgxnW.%+zfy.fi}e;R1@z觳z*s:UNOVɹɧ]7u +yrg\xn_oؿVrZmioӒnxCE$5+ qC(Yn@?$K.V*^wA.hKۈvЪ6oJn:%=`nms] o*+3 wZ_ܖޕ$ٿRvIyD^6fۘn yJ\SE5I,odIQ,YUV ,|yˈ|f~6f ` U!c[}V~ lz9Xh~]ϤT-]sB* K`$ΐX"]Db ^# ߣmuRֈ<]0|!9/YYĀyr"CQGhs+&\F4Kòc,[՜9njHke:y!R'x #Doa((u\rK,HQe !0$T8Ėpו6nK(޶LIֈhVsݞzc[gNQRFe ƚҸdKͬc#p?XY@v ˳b\Rp)Aj SAsA'&R`^į 'vw9b9tgf r 'Kb`I(<˂'KEF5Γh@{Q t~&:L)T=(WIi 6PF%YHqįƣXfi}8xg@9@ #PFH *$դA 6z%0dw|;}'%V"X9INjppɇh^2Ycdvi5B4'KJPS F $-1}z̉MrKjLfj9q^VZoy@}szjq1%cvʲ99(~/u~qˏX\K]{FHkW;C |(ІX)#9Q@^! ;%{GJrN߳YbLtC_n{1y٨j.~tzOHFo^W|돇rH>|8GFjg ;迾~E[Ecyˢ{-I^4}mVyC_bm̎j­ŻϯhY;yrlҵļ3r l m4+5\Ge4l j CހxdvPo$TCMoԃGWuHQm$rh9‰9{ K <a1 (lذuq; qiE1Ȑ#U4I6'qӎ%  wÞN+{:rimLqg띝o#ReW;Wժmiv0!N #{TaeڽFoqu`X>Ǫ}y3BWWFY>ooWɧjFqK2o?Ә}<%R}[5e 핔`_NDTTGӴVQGȚj jIW}#Sh߲nAGZGg7l+- \u@j>}7x!^-Q:Z>kw)Mfˡ}nUVۭhãsRDe܁U1G99[G*"a&+3x3*:BoJjBH-,N:$NRO>7 8l :%Q@% Imfl =·ƅmP̅rG\Y֌o\c0^=Eօx4댝I: T8kh3K+# &h/i!{60ce2u2vDD02W݈nR53ؖuˬ4[G d$:JR)Kp[m(jZ #3Њ Z$iA-d )yT5!/YdKEօUQm;Lkpڨχ榣31v2i#6⩖::!**4hhdQFhD΁T$@֜8X[eD)!RQ&IEyPR*$QFI{(+)x|ǦZs7#~:yqV.Kk-yhdŁoE&h%8;bElktt*Jtfy<{46x(nMĻ'~4R#OΫr- ~e VTȑJJ_ΪP!-GkJp46TmKH!t!e!u!u'!a6!\H"w/%ZiGDIW! Lp?v`gwQm(9d::S6YL_cY|E󘤈3ȻN*bE6bF@ \2*/5 1U! bLa(HIaʈQ1xΒj}ZsE|V s526\{y܏Zt߷+ ֫6Kmnlק]sLJ53ӿ'NGClw+fjhW)wBi.,Q.jdpDK)\8%rVWLWc/ c)YG1"3hkvk7I֫{EeW2^ b ASUzh^~sH,`Q˫EϦլ!,xۢϚDEB+QvWK{br6]AE|/?jsř)6!۟V'kSl芓Us@Wt=ؒUCWWh%:]e t qB]!` 7tjBUF lgHW3cDWsLo*5/thcڻUF)`gHWvD2Z.NW]=CPէ+8 ]e2Z!]ɜzDWYp ]ev2J-zt%2],~p-UP.k~f*?vadPq ſ)b:7l1vQR0}՞d V\>ug.'= ؐ4B-hF)-H4>e2\BA*jgHWFPx #8ÅvQaGۡ+zϮ{DOo >1!pO0< ]= vE@Wtm M]!}\h:] t IJ]!`ʡ7tBo*tQ*13+.H#Bp ]e` t zDWa+( ]e`F)@Wϐ4FѕPHp ]!Zx*ճ+eןG^l*b\gUÆjb38 ?*~`92Q5G~Ga|, C9Ӌ _p`ߩ2rm+q*X_痪|<RMJ݁"6*ER <S{E"":e.-k5|SY]T>;}18:=O4K^,]7VC~?|a;W&Y  J5"VBJ'o!^)B*S: vɸSQHC/?(_W=3g(H9w%X#KcC*sygjkGKݢgӪsf)QIQFKQhu/ Cɚ#GuGRUQ'JXKڜ$$ǖ'p%1Di)ʵ ԹQKN=j8=lú` :M_|2{_9Z[j羔hX(E %QEog'4&7?գ|4!rdS;?.!2CTq?/EzRR( 0Dŕ}wNݡD]q=/}<=k>lXn(cYwSP_yE/@z3lDIVR8\~]oGWa`B@0G];1b/A䐔m}ђIF-dv;J'Z^SVU' Lk Ӽ7-w.Fh2jQPqE „]oH:1 %Mq# 1}?nw;|.]hh喊?oYj^b57!|ͭ4[s/޷oGj8 ܗһoT7IZ<,`y^DM^o^o2vג66>c;/iAvBm-f<-DCT (8p)#S!QD,Y!™{wS3`!ԧNzo}¹TNzK>pmKzn/&.rŋI# W= /1.KIiU *FƲtnhտJmAp竎>Hjr 7}A2;Ny("DaFv]~=ozt ;[+m}ժhz7XݤH9GЍb[öv6]o h9j['TlM |Ǘ׫G7k=-#*lnv_W#.N(v n)i٦kE[CF8nca28;#ȢY +479/罅IeՙR5Vz=S7i)zQ6oQ^Q|SN\?ႢI''KHJy"RX 7ք&$@1#t>h4y[>r3⏫BQp T\P I{*S/8-jSv 8k}Ѩ<H+**"dR Eɭ@S hxq2Rcor1l9lɮk%)5=rapE5)Vk=p,ޗ 0Ǎpm757K0ʚ[]YXQ4-ͮaMB`}]4B*ih=|^o>ߣõlo~=_Wq8ۣm*ڳtlo"w6tR-KxCF}}_Zֻ;{:]i着CnnVe-Kmݿ]/K{h[VOXt~axyspp9ҏwù.[SWMww`ۻE6P}~ nO*Di.ae6RÛuj)U2uJzU3 ;{z uv IѡVv?Sj^a2p7 ,jtp<5?L~U8]m>\\,~d8)b{b:G?:ZjOTR}Cɼt͛cus$vr^o-l::-zW1jEzJ;.?^bL0u O(&V|Y =UV4O")("6IJ'$Q{%ϻ>u}Z"ԧXڑ̈=NZjG4#4q9FTJ Ig!c{_֖JTdi%3|05u)QH$2`tĤl>-Yg*%kozPM1^/ %GpHZ~@|;@oy]br$*"BR奔ĈLiͨ&iH6+D"14XIB<bXg)Uo2Kgb4ј >kʝsTK^931$G5TDf$L*e(}RNJDzsX~F5xHxbQD4}93R嵜(isF+\4pWt2[<@eE,W)ȍWdW7[/\LGi9 ϿL[ prRUy-=Qaƒb2E^NB./f.~_E}]ؕ9Tt?^ӽdB6ӭƏ;r{u\Oyr\R.q?UG+1ly/v%PXbf om <QJO52.o]zs))2[|W[FPNOѤt+I Sz7 &W~߽?ƿ'-P}[z%wO\بL[#!r͚݌Y9--v<8ySCYyҔMYǧ$&DŽ&8Ι49QextFV(N<hP2+ qG2ȏU/\j-jlfק@0简~}]dcuEpeIXdנ[h⭓Em^yI)IˈQZ#kM2@&q,.pN\$\L1YhȚD± ;jژM%$vfTUQIB 3KmԚBY:2A) 0R+44BuIm[>R n-*rr HզK9Kg\, DȲL JdDKdkaP-uw1K1C1EYe9z :AV;GZ@=M v8,)Ŗ)ӨzFj]mȬ.Q&9F< 3{H ŜfVr"I1Z]p$u7"-H}ft䐐ߘ21ڸ@Y3AQ gm=KZ2 T ?:RvJ-V&gXtˑZP6˖z FpaQkR]gU*8u%.IVgla!;1X>wHĵQkZJ.Vl%DCF<d2n.smk-"Vb )g)!0DGmw":&DiFܶ0IPFFx Dm)]+Gl!xDR;GeMV,`-ة* lxjGF2a םN%PtU*ba%3p2 =! ڰLU C kޠ .Vڬ #J<hJ,A[U1fI6Y1#zG"p:E̊;!\v,6b/5Kb3Z YFNx2h*X.I*L0k٠* MHN7PʠKbxX "zl\(Հސw|D8#MAB[4pH ir ,4(Zz Gj*@=1!3{A[N lcK':r։L t&Qi,+ѕ6eD&n3U/Ad%E[D^Dw3GPFk%,4rV mH3:Z  z*<R͸+&uIΡ՘w $)B4\y="d%eT͒NiNhh.u) F]afIZ#ƫwmm$Wh;ú_uÎط }+I8S )4R3#fwvue9y2댉4aȀg:c3 Ң_֭ňy2mǬ*I5ŲԠ܉s!: 2L satuw\Tir5 ZǁmZ$<| x #.Mf@} dnIKD@KJn8&8`Mj  1/Q%(x[q/LiYa ^`rQR0aJ种U%wmHm( AJt+Sx, be06]ƂٮHpc''\VA"`ɜ/+Ia0̝>m`~w{ DaLm]LՐE}m6F8e,`w@܆=pi/pC&o%!jUW@ @v3 hwBD6cUm뮘Q,O,֨48>pӁ 6e#s4K;mmE$gh ` e@Ao 2Zdpa,p]asWctcE*26҉TIXs LCud1a,X#4ꏡo^5 BYU۫ ^`VIc-ו֤s؀ >WNJ X{1sɺ-m0Z42<ۭ0:-/*k_./rsMc&AK8P`b+Z`8FL- vm>;5Ysp X7C|XpyВͮyAꔭg(.V*D@y!UxJ$Jk=xP@zj^#úEc4B?z_a+Ep xrpL9vK~M^gfyceNPhQ0RXjd'UcU:@՞"Z`p=BmX)+X2&] )b8eKC`3`~5!4LicvNs6DUۥ7`.ń120+&) 0"tŒ \NJ#:KcSרvEbީED8Ø`jY]zD?!77zxJn2J2T0)gcܿu<^`rÙE50Sr_揹M)Biufַs~pqn0WquVY-FqʘqUVcr}4F cT&<6պUZuMlqɉ~u]o=(C1jJ!u;d=/2rOʵ;r3n@fR\Rq=9>5) czgS|mS} O7}+'7IMo}D$&7IMo}D$&7IMo}D$&7IMo}D$&7IMo}D$&7IMo}}k|Ool}\'ߍ̄}J!H3Eٰq;:)o7=:gG/ Unp!kH۽E<{9f&e [ڷqHB~?-+&xe"6|Z\sai+7q Dy%u[,%\HAK)W*{ЭJ76牾&h/bHmo㢴Eݾ!~"ng߉}m_<(E۝E ;w,1oۭK|yvxqô(PjP_F_[ ++˱eyxQ \LۋdJ\^Oc  0ؠB e^t"aQdqͤ4M* s媷6w =4C7sY"=9~6|۲fK tUtEoZOӼ}"|;/f3O2>{(gԳ@Qofw?NxGo>Sa9OٵWƝi)eE>529E #CU9U6)wY)??O 2ޤ*UFe boeV<$Z.wUU,!>:ڃLM.-S]̿|ƙZp>?KY,jH7ŷMKfN SX#$-X?]YZ%p?l̲[]B2}[]E2 WSyK|_\r /<{Ryr ~5s>v5s'KH-;c:vx~(d'ᇍ3:s%=>8lKOo9y8mp<捭:}zS'wd *ޯl)ʫϧ8anywJO& wg5@Ҷ:xI/˛[-MZ3#n,.lšmhM6v6[9cwwzqyv\v}MJ!^Ά|1@O4='10]!jxF[· x黅A=v hW䥻"둻[JguϚsVd\s)^$ٛV&$}Reϋn$MeT?AY ھ56n/\_ZkG1 :Ef839-jT8 jLڐsnH)&,7__&{=wxF~,Nzx;“Ua{ҏ@X86>ƒe ֧{U*#| F3a`?:[=0/kAK+ҭ Xr5 8dhu$@>ٜٝ%#v,!Kl/"edm3 ĒzUXҥN4R)8'$7G彎9$qd|jxcݜ>̡U- ,|={?$A ZUsMCxgVśgSlM6Ϧ?gSlM6Ϧ?gSlM6Ϧ?gSlM6Ϧ?gSlM6Ϧ?gSlM6ygc4=6φ\ٰV +?cl@_w:/ٰ⪃=`vb6X46A0SIő8veÏS0=}vWleyϫ}"O˛GBG'|qwJ"=v7Bm{龕'{]RmmIBe(#s" `!ZP[FSp<It>t T1Pvw]U/h3p+O 3bT̀P1*f@ 3bT̀P1*f@ 3bT̀P1*f@ 3bT̀P1*f@ 3bT̀P1*f@ 3bT̀ 0GTζ~:.[.~88fܠJ} nי[Uq/Bj;Y"`58!V@+U•( oeڗFu}׎=1;vtf~) b"kYPK$|uv]ׇ/v a2lpΫ/?ZYy@3AӌӴpl kVJ|A$M伝w >ޤ݌r~=])t-OG-ooL{}Ɏož=7I> ZGڶ-Q 伸0Ħm!vG)ԇl+k3f@%̽LJD&@kkZ[ӤI''JmtG_f΁L ))5VT$+7F9VZm9Y[g0%kwAWc19SNFmӁN81Y";&jӢ22qɸr\3U[V&c7V1 &HeEIީ"?}/r><շAG&Shm 7l/Kl9Ҏ{D7Ö׃& b2yI!MFY7465_4bG>|-GȆ %SCU[ɦ̋lYmFcZrIcP2Hz;wEY"B" L}~lkNj 67X.>' ?!~~bZ?>bTMؿaHXbiyO>_7s}9?f '=(3q]A6<T? ~@;rT R+ir<MLi%j@^\ޮǛݟ xKmg<7c6?~YNxi˛Vʱh|/:w_w5)FFW|+f][s+Sz9ɮG*?Υ*MI W+T8mT6fH8"E-҆S-tq-2HI7‡U%9>h->Yaum&.q|fҲ-m3)܆[]ԒXdgݔ8_utp܄l&q|P" z?kad$ڗw>Řw as;F8-; U.'9xyBcN-N^ḹ":Q|^*0;&C2ᄑ6>\7sf.囹|3o\7sf.囹|3o\7sf.囹|3o\7sf.囹|3o\7sf.囹|3o\7sf.囹|.ǩ0K.M8}RM l%O_v!ɻ/S~X~Oڟ[ d{s` dl [ $ػOTìF%xՊ9-JTԑ-Dࡦ:dՊ6?ݰUj mBdݗ+JKe KʔJIVkI "d'!. h}'@3.OV0@h_1p$KkPi 1hM.57ӕkX~'eb鼎%A)d4дHs:{?__ps gQ*TϟzaՊz5%(u⯉Vb*Fqb*Y NRaq =.Nvv&ΊrٚbWޟm=u#( g|BcccŁ`&=PK帤8vLAu]P-f޴j̷@+ezӓ))ksy"p.đs5Tg8^]I^mV`<ڠ$At$ :9E[MN-i)-j/nl} q;Ej4X+XxO-32Zb1Mg*8%5Fəᜁ]nvqCdL;U:4c],cXU,Im|<"5JM /2U/RCPikaq&P*6L? Gc"ꘟJ`RgTB Mt >q>z>$`DJ?ED1"ꌈtHNߩ` AR5g:"l!҂82.nwҎ]Cnf jN{sg:![_(QkAEُr :_^RFJ!se5A"wA]`t, *^;"s"v!DHsj!N$Bi\#au RLIeQIac`!6Q#cE8@ KHCEH%YIԈ`rЈ;gϼIƉWs92Cߴ~aq#pGU{m_5%6Oc:p]-nɱĮXt_OTl7,*\H sU&JjJ8H2EI{غ֒͠oAƻԭ%K CGR!N,hgjRh Q$c! dG+UHEEM$RIAE)s8O1Z?؜s%{cW~N77pS9w#q4zeȢtzHWAӈАBQ d0ޕtb͍yguÑK {uu녂.\}=DYPb%Fe TL]jMD9@rv?Zk3o[[g"GY'6ʐ {&K;uD9&z^KqIuj㸈&]vu&ΊQ M(Iaaȵ]ڻHt!Ґ*gQkעIg*o Zrܑdxm`&*K͔Ô#OaAl)X+(y.+~ic]EO D͹ '&5P*Z̤ :KbÒEԿkjZŐ,e1ɇq&"W&=CK .((БlHZ j)R`2D$ p\}:̑`$ב j\I q0.hVZCgiX0F}GoUG `ՂiǩQŅxx䤻w PPs1ɛ?7o?(bP _ҪO+!V %D0ktxx0u•Vco%P0 *:mqR5.*,Wq}*q빳:!ep~t( I|l|a`>SGᶸ65wԯI{5?*58Z*!Sޡ[&puكŸ70O\#裳;7*=aDKT6W~/_L>8w R2| bw~ь,w#;8R oO@fC m=]uCڻQ̫{0Y`(Y|8Nw?pJJӽN׶gc4J9Uaj! ipR7U(Hqq1lqFMcAsp P;yûgo޽?Do`88[EFpgtͻvM[k|ɀ/[}l>Mu0s 6 JOo>ʥZ6Z̆5- ? f~0MT0q&5(r6 {K'K;P_<%#HD}1-C B!nEq{9D,8bu*(as/6L!?;j$E 8@ɠ#bNds,hʑNyO=JL'?qg흝yгnUNy#{GVloh -HKu>$%+䣕sG{|3BUGϚ0D?פT@JJM\25)xdLz$/=r {ɇU3Eq3rſ*[ݩvOq. w^H\D.Bq7MS EOaPPak>K^}n~\-X}*j ft-ݴ= \޹l~qY9ӯزyoKxo&: lls`2BGaXʽ JK`J$w=0LRbï00B@`U}$-GWIJ3\}1pEW|I"Tg ,9azݟ'V/WϒRa[pEW4պK.#tR \%qJ*pRb3\}pE|+{WI\:\%)pUXhGpK?pl_*IusJRΥezQ^}X#u$6*%\>H9R2m5Ϧ[w~L[MTaJ%.,Sի77ӻ=ԙu?}}sg^x*rDPR=D[f0$: ęBy$hedno2?rc<ہi8 B$<{S\&o `ΒN`_5co<o2~ι9.j!̤~ԘT֣ȣd,Gazu]"b2*"(`k7Q|%ƃI6@OaLE* yUTkQ'I<.:+ddd4c=4yI۩ KnW]KV\H-~ݫ1Dʨ0aQLKoV`CYL4d|p^+b `#[!Lw=Gg&@URfq06쁱,Qv[=ʔ,2ՔFH$q3շj <9_/93^>'%O4~\_;Rɿ5ϥ_qMnaj*M~܍Oվ~r?.C=ܴN*ַVh 9:p˴j^p~6<YB:8Ϣ("f]3@LBtmYOҹLϑ̒{IJc DQ8zT"'KcF̜wpYKcmyϟϚݘ9αoe~u3맡(9^VmRI<vӅ+*z9Qrn`wGĭ[ |"ܺn7+%=n\=Kg_/Q˻/ma߰{875;ܹ׾m]W<;t]{K iяwDL䐅1:TT1Ct4| .w{gޝGރϙ]n.K0,[Daht-(̝OF M.Բ :AwU R=i"4wBM\1EZ~p/6+M<,kHbt2Q%ƣ+y}lpt~?/:.89NБusttKbCQ QSvAt袋%' !帣[R˖H)l3p y]RT>&rڇ5.d@GUSW؜0($k4X૞B(Oj0`貤K 0dc̜P/.e8Uh^TK"4q#,z @puy%Y=W܌RY6,{@ߥ d+u"%*Sـ>btm:3#*+CR)xTr&#PpPlz;h^Ec9L#e<#QJ*:,p&H;m6GS&' X~&yHa/_ArꊹzCвl EJRLuHPN)gkZr)2I4.+W}4F4,TI-B(Ęr|]1J%h-4F4.s;{cE~k?5nB[uQpbдDˁ)4_LRtT^i%?[R)FwTԜLܣ90 [KAvYB  75հV6Oܢ}Fnd,Hĭyk:oc7` >S#od":5u|H'fC *ny;-7pɷ[;<5t ;_E=Rradsvx16hG<ӡ(j'#!P QdZL)m)=9-tl=Cn_Vy/.|z^>o hDO?oH2Hb@l*3d%C RpJƧʜ#Nˋ)ڳ_6qȻ9EiWLOykH0S9{UТ 3|l-d 2Vi&ނ: :QĸOl * *{q(l,Y{T)eWPP"3wUŠzRCggTR62SP\^hLXK퇵6ika*Ig'~(D)wOy"`~?ouq~6l.BmR2.&_:tU.BϏyZF#:-z$*&LP;zr!z%@6(C $Bl!gE9Is"R D^dH"uI2 ^d,)63k[]" ۃ)uX糬v iF"Đ/Cp@/Qˬ$ؤMM#h ؄&P|XPR*ʘ442d2EZbk VG K? >}W#w>m|X7E?>Q#//kF@j4onAѭR#\+o_֥@۽,b>;?^]^\-oM<$$ׁ= ʟM׀|{rkyqޭPRgb\'!zPp#5 u# u%/**$Cg썰Z) _P&eypgvyS޿;Vsduw˲}OG\%Lu70AeXYE ̔?d4FTn:ML l."5kчeI+A!iI"*d53g F]C§ew0ѱ)٦7H!|[-l'79KO[&DݢdoaٻFn$Ugoqi`/;IvMrA& >:ے#3_zۖ,-Kqn6Yͮ*VծPaw!oxm50jgHhv'=:uS(Z^FawVBr^$ġ# :f0 lbJ+etK ?Ϯ r䋹qAyu0]a ! BN UQG(u\rK,HQjMp؈j_6Gn/Ml?V}RYl!%X4 ϲ RrpiQ;F$ОrA!"c*iQZz.8Ӎe{jRb3%#S | x`!i%B4Z(@޿=T C!㾏8N7\연] &OIr' WB9! JWRM:9$`3ޱgZ \-?Ҵ 91TZe`}$ьDD0> 9wXȩLV#Ԭ =*F$4DC%ĩFb h $.1h}z̉b[5ggn^!GU"-}o/rD~ڙ=5 &d (L8:yVg[jɭz0=6BJ^ V{&9Ux1a sj7S@F _JrN?at#Tԓ{kI䍣qmhsByt\->u$i4PJ]D* 1L*f 9+[HiQ>۫_:9.:jFQZw7iN{bi ~Y>z?8QwMZW9O;oʍ/Gg  [Of\?t}6Yn3zY޿FMEBV|H r0l0z0E6^'M'z8{{9WQ/jUJd60Tũ7A)z=U?UYit1㤫z)T"O2߮| /N~՛Ϸ_۟ߜ~S뿿>}K\q30$'ӓ Тq\CK[O&|qyb|A,l"@^;9t_x䍖q7\Y]B (旳cU~EꛛTM/CQ!ÙYmRj~?Z%#ѭHLp)R&D+$,s!{ K <a2- (o}6LU^]} 8yNt(+\hqQGͫhp}Nӎ% ?jpN#{:{O~Σwv>Kϒwͪ6mgGi;Yfi;^QKikRȤR\T:85 tR=>Zka\oH_udN8&̩[:yg3tv0:z.|BID 3BDN H\lrf73Q*X d<( ZJ?DEtQXod4|%"W$nt6kEt<&"7|8gxTx9"h UF{F9klRTPLQ{L%{  3AA|wQ8?c3gPG`@.R0v &@8N͓qj qĚK}$VS, hl G1 NGO&YBSGJbΤ~-ID+a݄-2{WpH`6v4 #ԅOz[ӗ>FS]]yx//M0Cu+0k0:rILQ+D@d/2W'0BUcuR@FWn|ݡ7*<ڏQدΤxn" Rɑ'kaz,W+[A>{Y M?Bfy|VoZAZe>eKf8Ͼ[Ƌ*~ ;k>@_}*64>3)xx?7^!ϫr>ྙTtUo^z/`r^S# ìtETi[*nWxoE;G*{}M_Ŷk#{-8rǫ`\^*rIj/A[7>2IiM"pY*e 4n\b:8lCtxN9Sa{t"g̳gѻ3]z*Z?̉%S4!i8j EF0)c19TQ:=VtRl=h ZDl ha[04'(xAc3r__ ~~>[@l:1o} Vu!}b7h0uu3'«R^ΛZu؄a5U6Wqr(ʝrV9jb>&ø 龁<"xwn;5IGVQm"rK2A`kGɐ > aQ/~Jww:7 #$Ħ0RS-y`"LE N51쌜qc :G.0"Qd`^BYŶD,n/B(vC]lVۜ^w]^8 P4\JT0'"e&2,;Tg+¥&L J;fcRk zqyֱtF܄ GRթ!=b!3NVTF K%s۵2< |:")65 B!EF* Z@(h2XbiMElH%Sz%M%ci P[*SeAzHӋ3A]dE R HAy )E~Ep)ײCkC[4 sw/z:IcT>FXrYyL:?@#\Ӟ\>g~kڋkbT t-X29vyRj8%@Kq#AȘ?!QmEm*~#"( - Z )$ȹY74t1'<Χm.n lk|)w1ύ;%9 *y7|Zӳgr0E9cbJY@U d=P% 1S_M)@=7^youa(@P*)T`g:.ĭ5F^m3Y\Ϛw9=QǷ;Ҭ~փDZ#yG30F[ F::AјG"h@tnu@DT 5 T!uJW.[qw <3,=[cMQY;|"˒b3fHN \j$'j{d!2Hk$ &DUf=M ZiEe\^ Œ )rΙ17AkgJ#WXޝ!f=:Cq"E 8[ fJ*H +-> 9Dps!\as1+~=7g]~柿a?b'YOMX06$\ZA:HT75jbwd_arsv*?SZ[W8QR| av3 i47ܨsQ5Wrd'mEߣu{eYx$n/Z~D[+Z'HIqȦ$U *0/;D"XÔvfSL3T)Y_{cl9^c$(tJA=Ɩ j4"cH!([AD{33:"g qhC݃TFt.FE86 8ekZoxP!>thw?ov\ wxIyՈч 1.z-@qrԙ8"5cW%VbBEδѹK-J`]>rM2uȄȨQMDQv( TsL9M#j㸈ze!YtHmZʨlh.aѹW.!`-9 Hqk9(w//J? YL\-&\9>w N^ bmsѲ2Œ9O ,C6YRGg:o6v5*'~̍gge^L}߽z<~ Xq30VIApnt_5ko5U] 娛 K!{S|(_-1;v JO_.?(e$wX ՚Y' iV/d/wI\DEOש fÝJ(r6 ʥiGՆSE_{n;M,q/ ?Dn#Tp%hR` qC,T 뜳 `0 cD taJzáKj$Eƥ  H/'z9VY5є#J:{:*`3xK{|W^-mª!\i•4gux)~lN +j |ugI1<R3Ey:=hcL D)uP^J\,urA)OYvYTXI_Lrh}f/SAP-`k U cW逰1ccI{1[,({=5TnCˈ/YVsr+oC6(Y(9<O"P,6LrW_ߊSтh n \XK~2-6cxr/cdr|[X[ ڲֹ[i= ҒaE i-gaen&0/٤]QX(@,dsk,Qp2=@@\{Et?j֑u )J *0wY+Bߦib Y$`1|v-$ b۲EZ NGXUOυѪ"eO'dNxX!hHm-U1JJ2vؤgLadΞ+4SK4 gߛ(*`h^]O_wՄfdtdu֏'J$r͍I5TmOgob~N(((dXhGG"-g-^S wJRg$sj0#)gs̙NɥdfoM)zAyoLb:S#u;˝^iTφaor|,Kʿ~,d׉ͦS[O} .(7VYJjs4LJ`hN4ARQrDMhЋ* [YR[@CiBh%>GF51ڛ'jz;ix0K~7-]Ɏp.عlh_˜[/8•n<] 9H?=^W*Vh{B*F3O9gIh˹',3p2:R _+etMF.<L̰`ܤ:fYt)Ag T R}79/?[762vej`AYRmݡe!E`{eiXTJ|OhthzTzQfb)I=V`H.Fp~F\q cAw$6 k;blf&aU)Mo#2&-1v_cWjmj#^:!U"&JUӎwzcE4x(Ulb`~d H*` ![ (T)TX"MeF%+3^(M@ˉsW"ZC QZ8IJS`9#V*J*)y2!J J*ﰩLt.N*iQ#헋 \,qaђ>agGBVpx[)l&;Sv k*x.þFG)<1*t>#JhZk#kWnNp㱼\ 8Hwn%|t=|K|;2*oO<.@R=̋V"ᯌ| v#9+/h=~f!HC\D8 ]p3M^ݖ7M8v O =b_Fh~őZ.`Qˣ9;t W~3Wu[Ȁqclqa|7YI/1UQuBӅ|ӁJz+sM J6BxaSCZoFJzPC^R/ZNf&.('a>,d p/kK%)Q:H7'^cEӡG99MΑ`iXDx4h)zJI!Yԛ(֌lɑDZCc )樍uB&St&F 2;79]'HEN}|0>SadtDE*"K^3INU *ː1abL z?MrI -1FJYQs:g*EKZyOOGg`qcikj3w7[SWh;{Sk}Bh'sSLفstWw&|К$˥V7\C%YۀE ,:SDgvrwG?;~_?΂)up1ŮWY)_7t|>|Vt Sjn.k4^W Wl~ ݶng?y7.:ݫKl/g6p2g}~r凓_"n+x\oy펇6o3Jhz..…fr ̰v6Ż`Jm5(ZQdǻJ5Ļbs`i5FW Jd\1;v\-^!=>: ~G㪓\El]uSqpM(̺p%\TbTETjpr! rU:PNWLRDEBW+WV+T쀫IR@0g,QƺB+Tiŀĕ\[S@\\-jt\JKـĕjZՄjpr_Qd/\ZJ&iJS`.9i}4u7я[yC&;:FoҦxǥuM.ʯtr뻫/-#9^ [[am߮.!? BXF 9oF_/ook.ks3(ο/Xm`d0$ək5X.6͚ߢea>csDF!M>z0;r\,JhI9pV50b10!>=kCg7v]v6)Zw()yZ pXOe8"gjȜb#ڸn/2XĈ /?՛d<`L,# !#_LPcۻZ%xE[ XS]弖T+Cf =ŭC"\`I EP8-WrmexM^|("ckl-q*<#\Վ+e׫3BRw$pd\u+M>q7k;EJupkSʥ |#P Er\JM\ Uք+W(װZpj9% UW+N"\`걮P ՚q*IJ0&hM V\\SbJBKr J2) W XzprWVo]\ pgˊpߣ\!kպt\Jk\"T`o~F\jΈXiFoۍ;&k4١Z+J4t bp-W=y#rU5oDQtR>E\ay5BW P>\-^c ˎ`}nr푽H{vT²vz*. E'jprWVq*puB+h\zpjyB:A\q)t:; \q AZ:P%#NWB-iE;ZmJj J*ƙW XP[ P3QtSe,NWVhV-6l-z$m$`]0U]k YO.lZ@ULk³GI%OeY^HzFʀ1C\UfkjjZ@7R [ LT+lH5BԂ+Pk)W^|+5 V PƋ]xU9x"]ךb$@`gFTkw\Վ+eכ3- F`C8vnruRKIuS) @5pe\pVq'!#P.ӵ JV:Pei\ Q%"XëȥDւ+TIBkPpu:*V+{-]RBlBL:A\ +$ 3ZurY ZYu*հwu.[0jp/UR=_-'Ԟ~7a=W!=ʧ_..Oq)7|gO]wWm tUGkCq̏nĮ?OoI/-{ꎯ~SWǾzu0l{ NKGTh2 1C!h OiˤM.pcNyyy Stuܹi0'pwt>ͽ8# ~;ǤZdBBL"h ,hΤ3%sx=M1p.bZ{d9/|߸!Lx+e^`->·3<#d9"ϝ>Zջx>|^ޏ?ׇ5~𝛽[]<ܯZp6NSkܻ9h>S~ei$9OO"R]Y=Nž(be9>F=-7_˷?;([(ɆmkX'=%@J pPIj. Gb2/=Sޖct hm6oStlduʠ%ep˯XZ~}̶[[|Hhoh5=2k^oÌƇ⣏vz +}g8^]{:lRL'_2_LI; ސcvCI_Ի@dTq/5P6z7hVo:*n!&hF ?Sg_o8Yo) ~lZB7з3%PTmw ^7OO_(vURBR 0VEm 'AQq:Ly] D)FSd҈w W2 0NJPjy[,rV!]Z5  4qsX"MDnLjॱ4|J/l 6.{%0'Y?YP -4l,?VT{tQd8kf-؀&C NZ%S"S6`;p x:0FS({XmEj"~GQ"H>fQe42p:lyBgu@T }w{WIS V$5"&Qf@h!`4Vp. 0ǃH@$SpI<(ÁɄIfEy:鉶62h@ESg}1?VA.s]q2۲݃cr98ACdno_-k haܞIV("0tpf,FODX߰˨ϣ2/7l)N1bPB=,$o kN8F%j$Q*t \piΆsSzS%/1gxрKE}!DID~&K=:N[;! 8uմ mz'Em#{@l Dd/ij7v7r/WAgǫ gsPg1R&Y$擈 4 d,jN?]W;qz5ٹ}yg}Z]JѠئ8;ELz9h9a29gc?G+;Mqq9}.iWwi\V4G?NbqOuӲ>,)(4vhGn*_vt會poGv8xs q@[?3**N>i`jhޟ^ M29nq8k~zgG? Ŵ~sIw!{WkoGe18U: 43Z?yq^&0hJL?&袪(('=\!pqT^g3bNǡJ~~4h*oEr7I儀x= LJV59l)W~uŞy:W 1Y$xmQ^A\JfgU|P7rtW.q= UA*vE5jv4y??s{yN)wԻhL>Q^$h9!K_8??\7UAl<};lUړu/قppFM<`GDhS0ˁ"fR+A2rMlZk|Q| ˈPz@^ oc^ʚMtYuny7x25Y홈6#f_Gr%:dsW +P,H(p(a^]0Rj.2x Шs/,|{@-킵(:!OmaTc1sխ9k0 !#ͥKBiJmTJh4h#Z!O+;׊yQ+FTuk&;zW3]Mưe^-څ&\JL[4"=UUSS:{@)?'uޯﭥY*VsQݍۓzmᮦh!\1Ds C+Fm: /gy,.繭>EK99w@ -hc$qO^I8;8=?fڎ]jcv:0a8#Z0vÓ ׋P4 :pƒyN _ n3yNSsh>Sދ5l3unx>оI8Å\(I.bh7 Iʠ$x׸V]$,P⊹ -80JRQk"tD,hac`eykLw֘ߞGo*NY󘧻zQP* D0l&)agAibMT0\`UH^xnY"qe 2FJ%pg("0 C`He5rvDMNRGR32ERFh+@pk4W fqqHR!x&G$M|bB7Y!.hXyL @AM4*W5,\"U{UE>0ZL$,j^r Dˌ2(\3U! A$t2v@26>GiYCdmegP|-w E%9>7+[9>7St0Բ;>;<>W0~@u@6$7ٱv1v(T )] }  ?JX]3 `|>ګ `VdF4D W>ИHwT!^!FnQE.C."LB|lI@exHwb))OmöF@5t)kw3>Rӧ,aO翗.п@-#[E"~%D,\4w>œ{כE g0ԋqJP#j)cg~~ d~AhU ptJ}R4(AؠCpyihu<-AN9j-ȭ8n*Fh5^.#>w)D׾u{y~if# (NDis魳BmN%jrq9nĜ"9P*Fx#<>$D$NR^rB:̭﹧:>ГŷʓrGa~ά;ɏ~Cu:e5 ;7=6BJ^L0Nଇ_q&)sSP drŽ'\I)Ƕ,3TWqh$Z@J8j=!6+P+mȶ/aRc^y%Lj Kd{濿{R\b"KfԹU&= H{7~zP>üw0RO~Nߊ+WHuGlgMW"҂ȴ(r'a/>VYi]< 4<_(,CqDӻVKTN.|=4Ͳ0FhR6sӤf]HWQV@WEz+$o\5fk{1?A b-,>_|UheopƬ|+#:+䪷*+[GQf#_,u0 Q*@8'L\uU'~<ן\.Bt?}ûw߿}N9o߿f?A SId%7_ ūe]^͍ՊYio^^gWr1!Vl/'jqCo4 3Wv-u DJ2c™zaFye5z;$ov*2.903K =oA'`s{YrI,ftjiNm};;GF51Zj8T9R V>rz'`4?m1qke 6U˧MGtp5i^#/ Z// bNjk{|` N% |ώ0[)l&;Sv*Zc Dtx^l ;‡;F;ᓀ(N}e5GnдmKVQLҐ=y?y8py>BHćկ) [ 1 _M-_ s.ox z\~pzBk<,9P2Mc2E8 ÷>jUr?<^M _}7uʐw 'WQ"\{hNox 6y|7; .aavr?O>1g+TD;{fW whBg?&דE+޻I6Ρ^ 7= U8yݛ Gijw˥> 3RJ Q?Y ~_b)yr%hK ai߶j߶i߶i߶'걄KńH̻lƓdkuLfI#0%I-.#ݼ pֻGh§0 {aT\rwӿ3Y=摜`&e@%e8˖8' Lj*J4:)|A, ^sx7 S1#jOH1p.}*68Ꚇcg0qt,iJԧ+M ߬ڿ)gViPn,:fvKv6(IB9OUjJGy02%KS¦T'p.eQ3BbEԌx"5cIeS`02:l"Q"%4$L*ee7X1X"t9T0hp"ĢROi2&{L=IΉQhv9mBϵaL52;#l? \ ^V1wVՏmX[iZg\ba3TKtkg#-iB;B,ƼPB "װLg4x_b>RpK(A1'eαydj)&j(HS)hNsS$#Լ{$+NRIR` c4H395 MOFz!rҖgM%sLTKoAbI<ƼNT*^ :%%r\VJgiF霩G#,Jrk9v:]{3P\}qWIO}xt$\7EMmYm0wR8R"9p ys#.jp TKܔd^:NyiY&k^h ΚxsX3eFLd,F&CS gX;Hr%>Q.( &I8>E+x"RhK&EyI=MKlJ w%զL!`$CZ0Z`z\`Уhx:'҅@'89 rz}qb:HξtV/ Oi )ZB­wz9s/cQPL%t9 (OF "jձj'T]QI n?܏:OǔLAT |=t롒$ E⿦Ձ S8NɝC>e2zi Ƿ^;k9** ->f l@vʚmiOhe| pZ\ m4[hW͚Ƨ~60=?dV'K*?]N^x葇Yf@;g7͏?| f'/ݹ? 5ϛ]]ܱ(&}~D~^K^ T$6cpK1U@f$/x ϟ.鼚/0)6/{Kf/ :wNG&B H[? q=Ͻ+kqe)`Ra^r:r3 ZIV $IΤ~'ۧާ\B 1˯ѲטSKnݘas67I׹_+ٵr=y<{5yFEgRr})Uu1Xr L16*-)+i%( v9W5K =n1X1yCt aF5tpeknJ!4-j\GWOBWH+۴l".k>3m[ CWlæg'Xـ[VG+h+Zv(wm;rW $ु؅a6_>uPmw|;]j{`9E,跻ϤOvJ穜^8yH!~1ğO+>7Ytz?s7pMԮ`\{o*C(~LV=|VvTvxaayD}* (O薉yxBxZOwLy`~A}8}3sޏtֵ{-wZx.$$ǖ =Dd8]XW*7⸻+:[!q=m|u=Kߡh?^"hMX_?ətQ*j-zcb2r6:)&9euؙ[}49eC#Uc]S!R*rʰarT2p`ն{U]*O= Nl|g?-4\UE@17Q=[dRWYٶZd!f0X7'ZZj j 9Z(QM4F5S)RMEcl߾>ޙK͆הּVVӹfPRܼܴMM#$B}cLf c7iD34f"r1 Eg-b/9%|nh`њYM#)fU{݁6{>m!De9w Fa?@Xͥa1R=^bz6;. oTh}GJGD=@#UO$vuIcU!m Zڋd3<%cNB6?}09ΛUE{rTRYTC*)IW΁z9Ku}6:9DkQkNҩME.)$qu$~`S€ڈҒ<'Xr!xQUzKfpڸ\=D@IOi5/*Uէ Օkʧh(8S'_c 37%cfC)$X2g dGhOM6duGގ`fQ#_f E U)L9'X3rS1ؽؠtxi RsԦG`c"?k(Qln첫+qA[]In!kBhcnuEѕ<q'Ygeе%XWoM\s ^ *eܦvb4o:dZ{j8sBE(#{*MU@ԓ)e ,lG?5p\IQCD`X[e Y  ʄW'0n`AV4*(k@oJ')CiV2TSR5$$Idy=X "܁pYW ++[q2FN[@zN!a@YPќ=4v]YŨ3|5gAx`1g#n nQ!6K7)Y_J ƽ'T j,#3QA@7V!jo[CA̬%RI9n,UH( ;:=~ 87ĻebU.&BTݳ.RVK=#f^n!G5M_mr4R.8( 6A ˒q1!uWH&мGwU+czh2&M ԙyA;t7h8V̺HN7**66ʐvNZ'D̿ {060` 벹j뱬Ncc)P TA'W f=fgeWw ڦL[oT^-GGΛБuJ@fIW=$:XF SQd4X(`QBgąsA9AJ$rAV-2)e(<ِm 32]k=h^= KH`Ge YNDx$ۑ ox.JrA#P`ZDbޱjx¶XeXBTD4X!Ov7?wy4؇\t詌 MKе 2"1wPڧy >?H*D6HTʗPw}C0=S玺a4(ڽ,=k q[O TRC;V@@]C2bB ;"Ϫ9!0F,u=[F̓pDƢ td!;i~B̻1SժP] C VA;:65\F ʰ*)᠛FeB A1+'Jd Ckဟ?vX=VHgYtg&)ђѠTf% ;Zm<.U{9'BZF7k $P}AQ =ƅ!Hmi@=x۾t<]_gi]<ݞQ$Pg.nPrlFIQi#l fݵH(JfY:Z ݚZSP'ՓFCoׄ bL PN6 =5=AzV{20pPaRDluHb樇 tyB1C[uw94D+1;d*(uFAj1֔g': ,7b` _bDcڢNnBb~Fʟu7(V1Z8ؔ cFYTPcD$eQv01x:yT,~ x !mT6ڱmX@54'ynژ#*Yi6@2jYTAlZ՞KЦk&d d, )Rȅ'5tmy *Mf,q+3z  kF DOU>:jUàeKf@ 2q]HfhJ7a#xРAQEIJrlCɵ+FЭwv@yYbzF+NJJ WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\}Ui W, Wnp1\v*<0N WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\ Wl %8Pl)В:zPZ'h5p%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbÕp%+1\J WbլO Foz;Zs1§oݥz7/`XhQhGh10x0 =p.~m1QpQ~_zZGtEBWOd `bjq)t5Z}t5P޲` ]=2,3-G] A/֩pt5PHW}m?+]5n1t5_ ] 쎝J!]t-8jp-Z@e9Gc͒ux)t5@ճ+o7t3ƝnW<ߎ/gy}V_Ov8V Ym8/>n7mnګ^Zl8[ۋ~ze g\{ȱ4W`?̺l/=4&1|v`Y6dwASۉV$2vñJS!{;j~Iy۰({˪$0.qb.P7/b)̤];?-8ЎPH\t˯E]zq']ܼAЯ ruaR!ᙣ>M{呵wJᙬ+@E]X˴E}URRRE)woB^KZ%j`VKQT0'ŝĞTȚ\߶o7%+ĴI9/7Z]+&~Rms>d6`kf 5) *H U'fK(\'wP[ο7To7DžW;3@[э_2 V01 r(oϏbC鯾# J jT!Z ۏOE+*e)ZbP +PiC =Z@'>CRәAf ;P#}ĕ%z5!\`q:FV+PD^|RyWbˢDŽ&㪓\KaqM-?Вn* WDcRJW2L2U'[/{:@!<㪇bTZu%OW(WTpjW*qC\qj t+kĎ+TIuG\ F J0FLW(TpjEAThUq%J'+,ȴ+\\EX*B,z\J!2z+Ŭ(s:hH9S!+ϠI|N%3#?]h{A hJl(Xd($3j5*yfc[)AZ'w?R 8F*@>ZxBF'ii*FǎiTiYt1 =|hB+KAĎ+T)Z_]%YLz ʿ)|&~lc}[VQ}כ5 p"e{_JBF׶p˺Ԯٝ$9ekQ<:J u _P=$tHٱђzfwH:-ly:+I*whP}6+lU2&3jWRŶt^pepMr PT ]+k,,!\`dV7cWuRidٶlcz"MP >U'Tpj5vIʸ%8p J&SU:lJ3z+A)YW(Xdpr}u7bJqG\UMBTdprU2cWqWI W֎ќ)Tͱ[z721hЩZ>[bMD.P]u*>#?SJo'mQS)b`fI2]lI2#ػبR>vնen, h 52KkL9,׋'^-gVY|:gΠbwr5j)xea2%gP0]k</C ?ֿ  2dSkgb̿ѣ2 ]E|%;I}9W٨ON/Mt M&*C+y#FX[&mp7FITi7:GD7v>&8y(|>Vŵ8&^cR 5WBxY ͙Brc&%% vJ`c,O\ al]Tե:ëC5{MlW&x\X}W+᧥"/ݮZx-7ᜄ&7 L Pnqf'y}bX桂^Tjwlsn1gN1VW:}E $WxT%B_Tiau-$t9ZV@R:V*75Cy>.}վȗX`ӧ88H|5t -D2x9nč=)qxݿUiCo@fa73b88bq9Q СՂ@Jbޅw񧞂1͌e]&nMSx?wrlHKh2 3oÏd^v6яbZMh| ~^AWlx./fry|1jtH1XSe^ ͽ>H/O_@ h:-> 1OIwfXOgNǣoq_VO1dz Gq4,H\3ɚ?* 9ջ\u|xu<o~NZ߼}9TյTe20BU.Ӈ[~؎cx+_Nb@f*ՏXMoa͇'xgO*lє!37֯P=twYS8Ѝj2ZN0/ָ3Hv-Aߴ;rs}za[>C@^Muxd_r䦄[8B.ܒjW7ȃfMIknrr\gׯ<}tGUp{ZW52Xū_|k!D;W0-obzy ݸdWSju/wmH_ѷm2漳; X\زǒ_%ٖ$,*c 8YcӟìFHniě|oD;퉩hP{W?Ytv<gU+v\߿0:.rgn&ivOsKb|`w,̘0$W#nwP4,@OFG~f:gᗙ6l>n<0IeOᨉÛ 4q^xatO|ΞXMܸ}D/ݎl杺^QQ;buĬOSwo<4lwp ڊhr(R&A19L !'aC_|ܱ *U8gPwą9KU^@?3ABd3*Y['Ɓ6N`#CG"slWzU^Ϛq^Pm5ݟ빮kkDzFكtk}S|eR+s?^IL[nӫWqE\=־'m̈́g(<>e ѫRuyYte\=|Q<`D&g/5w$^p}G7:!ůь^~{y[#WJn79ʩ}7תB7R2CF0F`h\zzz*Z}E0-SQfBj! 382WqMW7s C8F K`2G!uQy#' Z̕}5rW `ry=+Ywެ^ˈ%Hǻ\P]f7X_0ӢE 鯄A`Zo>gW[[U_!nA+U]g( Vxu;]TdϋFc/F*OKνl,1ق7Wh3kq(*0F dA{LI1Ed0!xkfMٹtx m)]+ &0)jX^\x3 Z%d*Æ;P^zE3{ϛ)OSN aҞ5$k q λ}Ơk as-۝eζ:YdCHAjd8zFl)&2lLF**`Փ2D8 J+ @91%恖SFOQ'}yX lfBhr\(i0nF:7VY_T^OWF$As8jRD똇4 AW 33j'ܧą`ޚ (;eLLB%[#*J5r |AzI鎤dV̍Qx;Y'L0:K|I!Ip(}U6:of3XiCK%>pn t–Xm#)2'-AH[d#G[T-W-c4RFȱ 3%qTX0!$Q!ȮEC dM %eȏײ#>؆gPzJR$ x%P$KT'ꬂ3Z&4*:2jOȱ:Pxr_Wޕ$NtkUVA<y*Ml,h\ЌgI)SԚԡ>HزXBVdR:8rpDwk,vp5rG5 ݥ?~,kG-qSNu}@~[/G1_ b* ~KiZ>H}]WYd4 @6(9#%%RƤ~Ӷ݃CSu~d1%T%ٔZI@".F4|1:ƓwِYVߔs{[g•tӼVh= NgMR>۷+HK?`#I$Ȝ4W6%Q֡QE|.E1s:C&sA2rZ!t0ғP|Z<.qw 7WWaqWo=JlS,_ɲƬd2ڲH6JXTIk`A`fY;ѵWhy0D3#)"D`Xj%T$):kI0<.jl{5rO&+7uJGEM!ː9GIv \fft Uv p5س *Qx_A|AAcm7^~R=,y⿵Eh9kQ̾q£p 6,eceE˻RV2b ё{6I\ F6|&>%)p mg|LG;ly-kO<+ч(ICK\R3˘)*_Lܶg@&aM:2oqUvZoKIIu!I9Ws@HѨҦ*]&{G< R]AˡƘpl+b0B~׌@ȜňrȊmv R;N3w d4Yt9{Z.(~<p/5>L o(8)0'_:Os<9 BGm/⬫SdCM!_;2K θdY#%ȏ/RGٵΰUmv6m.m&i]xG6 fm>JI!sq1Ł?oH{G]Y ˕]q FWo'mSb^џcFRrHJor0b0n0D6Uh*]jtq7_rpkrmUݣ'mmssu\"\BW7'v)O?~ӧ~ O?}qD+0I6$ x0N~к4|h6g qe)oH[cvn+@R~|xZb~֞x9mגȃ8 Mb~hfh}jت'>BlGK&o;@=lp;icgH6pFbsd i/IM 7"<7) BiFΆkZ$fBeEƥO , `dlN9:H͠ttt*pOOvOI/waU:a;Nv/%وxm,M#1<Y#Fy0 |AcJ>CR/쬷ws5FZ|kjFJ[h8z/K@Pt 1y.Xh234rK@'4d0vXD\H4(4y &t_ ҎS @o"Z{DSh6:Xn-kMpa]k p -Gc q0HG^O ABZ]tGj+$ŰAaF*! j"Z%hɖ$(`9_6`GFf&^V(,kCVIVuC:P*8[s͘8zTxklƴoM#c5(Zۼ 畠o۫P`Roj!װ PX{*W+PM/TzV~P۰xO9ECk3n2~ɶXTnB(LS!UꇠAҴDFa')p>BiҕFs,Yvd匕SdmJ>%p!ǔ"*.1\Ef1\}F҃FcQvl5r&SewȲUqx o6K➲Fb>ht`{r!S ҨI]VrZꪂ J7$% Kfom.HS s%"QGPYP󸠦@V48 ]WbɞPkB]{4kCےjv;Έ''q@ٵGT,7?l⑫2!271JTfȥ=ۥ,H0C7Z\ȺcFdN{\̒ R=%i3>E3љH@k3)ښ9Oh _ TӅ8c_]h+BƒEenۂ74ؖ{<d|7 zC/.^ Ǔ]cڷ(c6k^8C6HI҂PieX|&ƳG*7s2+Zb(DR)MI]GSt!Ls2 >-<i8kB7a69NSֆ&"zUU V xK}B1h5'lR:堭1 TϘ tBcU١aYU`gv}{XkWsz#$\?on}Ko>lV+``6/^;@4'jx'ݰ/vZT Г<\ŪѣU1 ,uJ*h]8!*y~ۜJrU!Ql 19#ZxͥXQgɃOFSI$X*=!gosJB.2m\M'ot@s怷8'$+ɢH&x86qޥy//=Y(*l38P (HS22>dEw}N{OҧwfN.gxr`-A: BNؤyI2qrpB#K}D Ygg%3.vrW{9fkpU;)WՑ(A`ͪJ^ͳ} +E[P֚]RBIu"[9R?UV nϭYoOy>}⦋6pwzԲ ֿc{7w^j]1J?kN8V?:v#N 3]m@t#^tt%vvJƯ+bBWֺ$`+ipGtE+t_*NWR@WHWh/DW xf5/the+B)N[:Bٻbt] ]i+]`/}ojbw0!ҕfi<9ma48/1E<@R38F>U@GIZd]ͳzez}5^6W~\1Al󈏦_v4~[YJ$T<槿ݛ`rX'l %{:{Sr|^uFԙ-E>C霒0(snC!\蟊(bƇ\ އDϚY$̮̾ZNigM]:&MY^鱿|3,Q Hs_h&_]#vdoʀ_ʉ1!gL~M36Zx:zl}.r<9jw["z^ڵsV5eԖk Y7?t~f9s3͝3O">/?sywwgBY>+h5XrD+͚Kțo͜>Lɡ.9]Zgy^~@nBZSH}Z)zҲ#Q mΥFAo 3MN ݋]3.eoGs7Fξ0@K+!Ϲυ:^e$p݌ITru%}nwmW6׵7&3M[5f5%n:nɒ+X#m2~kۢУ^c5N1\ľl2ZtFʰ{8۽:>N1`ߟPE+Do]+FiA^ѕsD.+FC+R}&y͖J ]1Z|cCd+#pΉ[W=]m ]mRvt%vV6b{.!ZNWk)*t%=*'bNEX])o@b;rp{DW ؙNb}%1 % |黣+ zCW WdVv 8 Mܱtqhm=iuohfG @H@&1\'ZU(p"zt/W>]1`k{CW ?e z5t֜zu\mUh6^f37 ߓ3Jӵ]z!={.]1` +BWOWr)U@Wݙz5T;wu ;I)|;$eor<7_B?ܻrFB uBf~n[d] oL:ڐ?դpz:N2rӉtqT jۗԹI8m ˢ5-Z][4Q6v2>+\treOvJz圛;՝?41Tzquus[4K#itAܣYoFLϴyq0XUW,+ilzb١1"*ʤD$7(۫òB)8DI ]1@Ļ+ut#z_vQM !8]4cG_DW] JIjƩ2 =HFW0Is<7=/sH>CEw,5Ѥᒁ~kf/Ǔ r!W*s I!}'FF[YV./NC* κLZQOk2YHR%} ڸ'S}NXІb_7WBUIX(9P$)R4PwP!^ A$g3 <5Z vɇ ]it7 AD( +^8-KZ".IU *\K-׻ &&b5ZR,$Z.&l2K$P;]JI9l%T0lR9#jf憠1֐ y%X Ӆ`#+@TIKHCW .9J')($KeOɢL+VuGb]Eim2@ %YQpj!ۂZdk:95@TYmq8-у;G(%kHV;8# #>؃ˆ}ଵC!" z2ޮ&,2[^3plBsk6>ZDܗ'T\ЙsXHnn.*+AUNv8*pDR:mdR59PE&mFnSoh4͓X|.S3 ˽jbA\f͔' RT)08hʘ'S؉_yžn&7܏,k7b_?OUC\}H0dQ)ZI|>C.瀉b_7ɪ|Q)9M@Mʌ2J Kdq|IŢ.AoeB)  I&92ehTL1:-dyIHPqP&;yP*7ĝjK.E9Q#;џzFbޱ"8&[WBNC+/Tvt{wsn%1jYbE];A؄qw.mJ`}Dyq>MzC$*kNhW%7?%J '"<'\P -.NHrNFt P\c+v%)R֌bPR,hk~zoRژj#c6=5jc3:^HFuҬ wk\Pd]g&=ډ\;:o Tk!?׾LnO6,CN3A-Y.w߇6;|\AAI'Xk&vX&Z/ X99y.dO jJ鰀h.I{OZO(t(,eJia$\ Nb#1;TV˥)DL0r";YjRN&t$ qtN qN-, ĊvtB7<]wR B?1H@0.%>-ww[{0SdwK%mG4]3a܈& ~8yc6:JC OB)bP~G\|XRi>]r MR=h8("0?ide[Z̳y< ȍ< Dm^= *3W}Kq%W"7 S]*3W_ COv-N̸Z(̛u+&gJF7 q%*+6+֧pn+Y!W2iuE\hlUnu;k.LYwis;;ve`Fz *C"7STJsrJJE)c< 80 D. +Qp_+QW[UJH`0<Qq%*"gGMoW_ Cϗ^&8"jZ{w\.Sy{b8DpJfWPK_JT: \&a \ApTW" S]Z֎+QIIqA\r9J+Qy!+6+6HkWr({Ja+n͠$]$r9~y;u+0MtdG, Y>((|QNZt]9deοZ4Ȥ|S'ljEpôؐ߼LkoEe{-vl\DÏKϱpԁ&93g%T0 6cZT궘Mb:Y+~ W"7Qp%jsZ;2Z}μE\ecNJ; DnLJvHv\JM׃̡Ƣ>? gŷL.Z6e*ҸWAqCo1+g%2Ffz\ʵCq*rȚp%~2+Qq%*$kx2ifPf? ֙v\ʃÊ% ap%r0`q%*6[R#5"8apQp%jAQj >pr Cq\Am4_jkwz\Ekv[kW/ك9ţݑLjG$kh'"',SyI1EL'"h \%ykȵa\ZZ},*S\mWBZg saW'wfp\:~ejÙ.SV!.UT\}[q]$8< D.Qp%jϵvLڶ*^Ws#J}{"g?nZJTW\mW3G3 ꄸGuyk;@q*B JaAQq%*ZVvpŁ ^KLpgJ/җn]_+Qijl[I'GH˳39!Ж]LȬES1=mSl>T#;ahK 94lI昣ϓz~@P-N"7QZ!*nb-@u; +×PGc_w60!8qň\vLʠn)p%*Sdc(֎+QNqA\e)@`guf\ZMǢUzЧK вH0W3Lm>wTMis&2+1+%FJT \.+& +ki\s9P |ʯJgW0Lˊ ⊲Kg;Nu%r0֎+Q"8sX-GKJ:? D_=D%iuE\Lxa5W."4;QvL֎iDtNƲp_&x\HkwmH4s{-N0d ^j"KI$8dm9iZEZ2jb"YEBL+ǔ21"`{"EWDZ(쮃$\OջcV'^p?2vAW.SUf+: DWHWh]!`&"b+B+BZ'DW/BWv162"BW֘P:JRo"+䞋&ePr SgDDtd"+~P]$]9ڏZ׳); fYg(5e/ֆwysCf:!pxtS=y)Yt'%x:5KDs/Tעҝ Il+q'OY^~^v}Au\U٬T{ 7g1,[U=́~~~-Z&Awo BtyfNxI'iAss[$6DK5r5 S?Ǝ?ߡYߙ׫'ut/Rnpq6R)f<{HnzS* hk$g* "-w v :{ t|nl!O7;:kqtLG'x؎N()a˜݌w. i16ޥ\l9e2Ύ|\O.N*Dk,f߲3ǪY6XHV0o ﴎͷ `jY뗯Oe{cy9cܜHtZףSXFkp8s vvr[Åyj_<7v"@ʿȮy=[6g@>xbFi!|nroWW"g6YS\7QN ȿ=oy5^H(6ĊJR\'wx;މ2$1f!E(BpeQ`,wR!.)i4d=~9f(Fh-e'Մ?6o3 Ja@!uPZ2wnf$7f@b JDpWacX+o6ڻϒ6Md Sߒ֣ᕘ*3&*4$ފzTl CI%Y=/f_;eK#X*ÿny5%Z3!5 (Ƽ'oGr;9SA8 :vxמ\͡-B {;B)O.jol{19{xv{-+ahr7ʠpt~6STRZ]Tx4tpeU`C+D)A+Ngc+D4tp3<ҕP\EDWXh ]Z'C+DəHtutv:"B{."*"&+BRtut&d;'"BBWVPjJ+y\tEplOW$:H2{%Sxf̱c#',7{m4<$;'$BiBiӚ!Ҵ xp\c+Bk* K8vX!WFs<eP7&ڎa D +|<S2Ҫ < ]=U`1dxtCx7#ލ~+ :{b9ZCYgJiq՜Ҝ{9U9d5&%sY 5\񢔼0J&-o>rTŠ#8]fEMfR׊չ-iJܭ|ko*, 5V*sFɬv\G;灕t Ԁ4\TF(Vg߆j/Jqe8FDWXxJ ]Z-C+Bit+7<"BBhh T %:@rκsij͌p!dෙ%g)z=t{_'W=WEgt]=UE|tEnEW C+T+0*"B{[uutE(Itut% ++]/ "Jx+҈AlY"BWB ,ҕ@GDW@4tEp4- C+B0(~GX%?V꤅cmuixR45E7TG~_Ze`)B P6N?m}ͧjȪiiveQ-;ǜ@G,'gt$'{iݗ"UΛ8tbArVWz!Ro6[3~mrUΦEv/Ύ,#g3$PȤˬx[VTFfcA7@^B3ivvޠODg=%%~J|CRc2Efby=ͮ.+xW䀔 n=U[Ow{HS׆Q2/=Էz(Oa,i<X)yi};WYv3cv8Rtu)yfUuc}|:'/kAeuT3P/~̯*9络R)D|6M v0ڽ\oh.ͭgՐsϨPfjA'3 j/uN*Թֹ!n4+ -kX&(2;j2Μ˛RmN)ټDN'p/"ded5D+d\󆨐ՒkU#-d ^;[X#a{t ,??n$ pz=if?|q6x3gtlͦM3l}nFk[sYg?n ?nW8l)LBa3kPMaǏvx5Gz|MOuth\^ 8b+#=V?O_[8o:?wm?zO'+àrqLUh1G|og=#${XD:"wxU3=8/V;q,K/9~„х+ ܲBRl#J]6\E36͕_)MU ^\<85?E+.J2F9`a !v`е.STf+ЪC(Pz =LVzM]x/4['B_z2Y׹ l >smrRgD+>ltVE^+'p4^nvrtjzZl+,6L?Cr]'Ė/NGxDk3է_O%Hs6Y.@dV3٠[Վ`oF.5\l{:۳e$|n6nY,2>3l+[4J:im겐%7WeYdJՂWͥ9B5|]6CBﺬU^#p?͔m1-vT1K sY4Ң-EJօ兂DV)̛mLB*?vVFIUz,,鼯"wxtϜtdl[^%1w7ѸD,K|kZE Ϲojן8bFGp!:׼i_\t!E>|j~ О0YFMv^)%A2S )2ҲI]TL,J4S磕Nb1F)svg56oaDƼgg+e,QVaZ0?~ 6{z/LVǒlxulSDAmq\_il~Y`as^v$RJˉX%2IQ2==ȉXb݋UuË2+d)LK.+ RI.)7J.4MtCL"2f%qbqc j{ /$8pA8DY?U{eVh޺ ֝ʾg ~%r\]EպbލFW̬#2轌ꀗFeO2W̄|-+SeųP'L[%_Χ_-p2_G_H:b`,ɢ JX\cK|˕ Vu2Uh9l=&*@gК)wڃ8|S "xrJҔ&0bU/_گjFe2HAqzOۧ+ x.D*ޯ+,vX Q ׼`' V{^X0y"v`^i1H|EX7x/WA}= ḱv22VOVca&aY+y4㕟toxgq9^;'&쉩Er:1{X!c^y `go؇@(F4G:1="i(tX*xe岢êֽ:0WBIpޙ@FwQ|85;pPT(e7nhQy D9=KݙI=L0@x +U@=$UF/1qx0)(ι39.flefչtj\Hޖ qOPf(M|mN$!% r)S5KJaq d )[Ԇd v|ri:L?(=[jU"k>ZSg>W/${e`A+2I)U}CŏibN-&[i8W#)7QG7 ,*Bˑ?c&z#E~6 kd'~,5$}bߐpKaFE2]Lv 9K1 W[ !Α*p%M܎<`~S;xc|6"ikd#3_&@(Fs &-AHwiq!qjx CÂ? %*ޱ3R]UdYQ>.G-Dվ֋sooc?gZa@p<+ ^")JpD񞺿,fѓoIXSfd ~vC'GbQPe}UH1좇uf{CEY>*~@m0d/K1!c>Ztl\ LTT]̅+ OI%AcACaB4ZIeKhY'l.`ʡ>(qTjc;߁2! Loo̽Ot sAc⁉0~1E)d A$=Xf2Ot%RkL5WQ+\, )CD Ӗ˂."=C1ŪqCD'rso@YʘTXt-FD8ګ&>(ϥxo4-CøPb\49섁"f¿#斚>2s̢q ֎/U:. J3>,VXy(ÐD,-2˓I=ѫogw~ et F=1o7V ,qݞ^`!3$%v4S)2brJ)$C931КYO3 ݦ;kL)bnta7y}bfU[O1 9*{d2KZ샣5D`HD&hH @)V!H hYZx_BT8j_|ahm{n>$yn*@lL?R4)KD]u0 ,*ibYC?#ėO 0`Hƭz1 (S(XL5^N.\hM5,}7_̭Hw 9ɾS!cI)TЃ6ѝLsޯ/N= s~ Ο;wM& SH) RUdNDZx%ڶx`\Q-9G=(eF9-GFc$O|,`%SS)wa,$ a2j6G,.t-'*/ 2cECbHiip*6|RIpޙ.Bk-MG'WI'aD /8t!TZSlQM]\`?re5j_ $]jϰ#A,ɸKRpR% h<'~vXѝ\}Nĩ~e}TK}tG6t&Sgۓӟ?;جif3R\r @Il@fB"3/ki옔JnsNktYXº6Ԫ^4P^&.r=V\qae8BO:e߼gz}TZ 3fŖ# (vR(usv,9An0}KKŎeS'hYWb9V'UD7i"OٕIDDi(iUF&HdIF}OȕʰP#}9CG=.gD!xgkI 6%D ʜ־rCsWFu/%PL RwtnN1M׷eʣ^;vُ좵9L>oSIkV|(ׇ/ɧ鑒(}'xLCίz|uDfQ`_쑋~S0u0nءێp D])feD@x)~ȫ46`ʮzz݁ eCI-k ["atڡtW k̞nI^}$}~sr ""bS?h>K֡Uͤ5q; '^_)koEG60N-$V3تt1_7h?x@d&AAs[Notoޚzg(>PO#ġ=or\P?Oõei: vŕN5fժBȫ2%pG*ۜ8Af=,G28'IGX(#:8J7 &m*Éķ?՟/ţ2Od`8l~G<|0*Дl$wzNf29H8I#!QG! xr~ϖ:8 aLYȀUQf',7FL0 nݍpfiET|D ~~{Is+h>܋[zk#uK*tQZ[A ל)byEn=Ejv]^`1If{Eǜur~r2+JTE|(L!tMiL$Z,wX#:Y%MݕGUUqWxq(ŵni|P#:yq@i=gewߺh4><#xMuzɻu{żhܭ o)lGA!BEO-!J3 ))Xb:(Or6^haX[X!2PLi>" w_1Z{g} v ~y!"#,O'+8b[z@{! h%WV׷g*yYUM{|'&N{ȇ0JG^^ک '/.pU%Q#_۽3R5FrP\<2l2OLaE L[ܯRBKV"9%:D-]I1^n'$ JWEsnyLH1PbZJd'`Y -(ˀ|_lUQq2њϭ!AEmg~V EK 8!<٭.DC}t@|~Nrԅ}Y=]_Kl~>X֋߳+if\ڪt믁1#< I#D-2 t!0v?% .|(j1B*kc,,6p{t:UT#`؛qF HPHP,]A\#9\VOxʖƀ~TF#.q*};`z_㭕Tb!԰9J1r Y=+E ZU:',ߧHHo-ˏu*1*F#.lV~FaozЙޕow<@)!=p"]f//th08` 6xcT!h0[ Ee R(GC M %cDn `q'N`| !ƒ] j6a/j6, =SIc?ﯓyBy[EW.?.}ehSAHEoeVcQc :<3Eȡ&fSDd0;鑷K~8(*1{!0Ԕf W[ ΕϸQɂBg\Q#9`؆!7|:Jh+T0EwKEŀJgiIԖXPϊߋ08 kUƄ;/3sxd=5M+\ӕ27NOh. D V'Vܸ|g`e Wء)qF8l̨v6Cx 0Xo~s5ln4 Т`SEi'/M(Bt]zt6x "S9&ИF[!rWЮ$SUA{wjp˻g\{M/]N˜'3}~)O[ mI:,_ LE< @Q+AX%:waW`y:"kBa_rbd>=o/e"=N [쏨sW'dO9v"WVHCcC>H_&w9,:|usE^2Y&cЂz+eb݇vƵ(2c?%:8gVQ 9b^l:v z?}#~Tv=Q9Idݘr#BJ/4HxjJ{O &`$T͡!dwBq]oQcRjM9c5ee1#46R ]1:nǏAHaNmE9~Tt:? T⬷sIBo3%:{(@.e4Ȩ8&fso& ; cyGP[1O:5݆Ӗ5:61՜ Z56wzLLCS#swLbh;"̸Z&~Z[2#H æyjrrO s1^E+n {LKDhiHCIM:NnBРL`(x0% ę~ŽCM"M3M)a(ݵ_h190[lz I@ŲUG[԰X ؋oNƦ(>Q%j?wT2(H}'mnK6!uc99SYc%jfcDM19{ >KE(#sa6. w#Oc[!4Ôs?Ƅ='@<1#!#F` ! 迶^]PVH mmwA>{_)JoG|~5?f|y^ 5ve^)/hv%T?Rg/VA SA`[,Y+lV>pP/\jpٵqԛ6%y@ CuJٔD{^9 xğr^_O3n pwt:/jmn](VvW߯?묑g5mGtQz/]Bb#ku޿~̖MH~RNiO&|헨"Z=e_O5J)xIBTOH~d hGkdCS5J6.y@ iG5ا5la3sbO Q @̩3¬@bE3*s~-V|#DQ{h^2r]\={r |7s{zHh*r>{[lZx^!O: aZK 18_Ts0=~QOA޺$8"{;ʖbL6-.( FCZ (5خ^YNL!.|\oۼ@<*eS10jKiK=H;P(Ax"1@d<ŜH22N5BEVR5шDLZBR2o( _wIV<8\ GLTm8cO1$̤k1@-vڻ0@"_ϥι,(hWSQ gB"3ϯ Ԁ\:1*q <]$WB,:H3bteuuƤ1ϱE0a,7o紾uK! E/G'=N{c990=Y*buK{QunHI yj9La)$<ͭ@ޓ1NsMe$RêZcgq?#!A!#(WDf7.jd [?WT(kFdw·/;;,h兊&˸g]0;8reIXL;"Ucc9NU?&uzCޫ1HZCP^=BJ7Ā(q'?RIe gOFFxm}uFاЯW䧋B>=>ˏ!%Mz(Fycۀ,x:jdL*\DpHa u4|b1q d9uMtRF]ϸ*KC1>۠ CޚxӖbBn@4Uq 8奔최Z=V]%AQ`ᇓOMj]LI1jYD%=GZMb R"Zuaw'!TNmVmHE7n. >5վ6 H0^-tx`w $ )|`q{F$Ύ1Vއb5 fm{ yEJd$o$$bUR6bQTgDfDZ|C(cV`ǣovdu,iNl--`-Irӽ"GqD"&[q.fS"_Iq ǻ=y롲bsqO”Ўh˄wQ28CԲƚE?Ѳ<Qw8[Ĝqv&wgD0{ѷ!̓h=cLL5p-$()r<;>MɩmegKͱ1YݞœNVi`ؽzNIjF=\vO'bg J;1V\^T8xڤ?}`~N(J{冀4Ɔb]62޽A A qّD azgmdBq-nѬ )Sn.;l EuT bv!_ Kax'&dN5뀯}^` A6Ku(~cDFp&|ܶ@1gOmANpz*AaK,Dq-"|E5.pGp |j~]Uq='Ž`;:L_/!}EU]OOO; [ V-9=<9vQ0|s y*ͦoZ޿NBe8Tb?M|ywxXb)Q[oItTmR?)U}?Ul+ 'o9.n|?D?⟠Żb<P SAs]jOE(8)GD (u)/6FmQ`PBs~^jƺʦ=6 `_{I|w"ÂJ9t< ƨRd,%DɸT~F& &gÛ:gZވr~!<-ź-(9ùsӥ^YqY\j.)+E0V4uc<.~M1w]ޥ2}m_:˥\GMMU uJrԭR?!0Wo3q3fhdEM hfmzFC;McIv.ˎ$sQȾ@Cz[o闘SKAiq_*AJCa\?{a;fc̟FW/ɼ"p˃Cqg52r8j/,v0M9߭K)IBgI;kS^ZmT9x]!4>DJ5,ZEez< (Gva'Zj7yfFOhBb4s~^1_@j2HTAb0S0ASѧL͚$)C -)J 6ljE*>?#FoXׂ^Woo'x"lov~|7Z`U2LrJB RܚWiiIZU5.ARh"/mz&5QQ]bBUb.^q0E]t@O4 ΡF8%8ZS+ R²w%/ܛDBcw稚\#51ι|I#&F̣ieZX"%3p:ϩSQϨ43E[\- p)!&vj9.ə'mF !l%F1ЁHPDc)ܹh rHCPuˀ:NT2ĕl)$vpL5Gӣ_C`IyfCqz՘L|zHcv-X0 P\~b s&sX[ (97O_14,,XY?:-mUkߜ6r}@@~{[Y)jw>X4k ("bOd2bUYFVrG} 1ꠔzwsq؁!$gZP{Pi&Bt}8Mi>|E5Ჺp}#bmvQTKށ4<9ݯahY}2\vau(H ,@ǟIВ_}3d=ǻMKM\iά cCe&_#3_ m ^=)Ӈ75ֽeozM\XiMP|_6܉|2Oiݲs*1kv`3ֵ6 SpDǤ ;[۷ɵ<,|6KHoC8Ǐ}q>.c\g)ub\g)uVq-QΙsDx\Z]rB桔 p+3S9&?ſIr0K)[.:->M yJ44P^`%V@E1ϒb=Xj,WGj896E6Dͼ@QhPJ ŬscT mJu'mu_c&`Xj+3y~X&D~YI& ?s뉤 7{" +AEYM燮:=?tlo2B/ܡhX4X5('BLEzhcPIýCDƨ~4y I#$b^xڒt8Kg,U:+-@E`9ؖGm*IHJШF/@6䮀:IrWK2p^;/y~kA kEFA!|S{u%WWN`BVC_M V>v7]Xᚩ'쐵)#5Hg?uUͽz(<`R'(ڐbp2/ TJIԉVs,e J+?hs[Q <_?U]r啜׸n9Dð_WTs't<=8Ӣ4x-0pZ ufR}' BZ8pr`1r0{ًۢNT0pEn5Ź_χB} ^In6̟-!KZ[KԊFΈᢍe1s/'`-[ 2',+_/mYXRKeXPIjIXmϝ TӉo!vVbb%CQN 5f KRrr˜@H;%'!؁JST;|DrynsЩ9].x=UsA [Pg l"1ȪR ;zcӰ4E6\ 3;*$.HX pT&7GfNڀ@4O?s&FQJbEa$0% x93bg=v7nR5]lTRrsg:s[d: q4b`gvT`$R6cݨ]ܣ^Fօ8RѝsB9j*`{"[`zΑ-dh}^$!z؍Ƥ6E(jxQڮx FIC޺-,xT4ldR9Ktu,`lbP!R.Dկ&=5?F tBLaJr"EJc"(4UG+]}@Wnn4aj+]G|0TÞb_ Ms<\(r›N ]Et 1+1\zh}'9c]Lno @nG8'M`vtl33qw_?sv(A e XOΪ0Ih"+ :,015nB2/fЬB[1wO{ox 'I Tr:<<*YKn$^c覩&i@slXE>!SҎT5z^9n̡tYͫm>uY/> iDWdr@kնn-#&*dT}ڤ:nTN=+7P1|l%ҥ)z]PW^j[dDHɐf *&Z*{oҹp_(mj9"9IG3-j>x}VEX}Ѿ{,k?رa i <Xt^U| I+$냨vp%7wn[Кj>0[zn`W/})sevܸʐN1[gklɗ$,T?7e[,'svja#*`pf \}3C@4CD!&xy2$_P]ߺf@Yt5Pb%;^W?Lsh\{k[XCk@;r^;}CW SFs<]b [~TDžyY 8ukHBi9Du޴6D0d̀evY]A*Wڢ2~kis8A'f\ Hđ-}k]gŶeғ:PIXg7 5淆?j_ Qft=zLß-$'l|,3s}:fOZC+9t|st$܎zoWeGfh_u0(Q.1M5ڳq[]qS5s$H{gV#T8l_BPѦ72JyG^up,C;;[_Φst[_ydcaܼMcʳ^ŗOӑ&K{yZlڱƳWTs*Q!cY-뱮UӪkϽ6fݺͺh K XkR09pm<0(LD$_u&wccHd6>`Bs!Q;߱8oUl˘cOjpJ->uW ŝTB1YZKd?N&V̤"hgΧgq6Owq6r~v>ɼS%`f^O.^+8ڀBɟ{[Mup:y;~44_9s;uTbm.gl|lxwuؽ}AUG 'tEGF쭆˼wjS{SNU[ oo owRVb4PbbٝTR/Օ=ם yKG/{{&﫤 "l˔ %Cn's5RpEVH928*N:B 9N2WrvՅVCBa;DW_6hBNiH(L)x\hhgL1 1ءB^,+QPcvҖ.ppT2\xa{`pPg!@ƺ{zh И0Cw= !C . ;VH A;DXMI)G: JN':RN Cn39RZPp̪u"FR$!p>jw$wrXV%1<]ލu~Ȕ=Ktboɽ+c06?}`lvRwoQؓ \ %̹!|xDZJY/-]b7fZma3Jj@D v\ʚa!k+ س2J z5-l'J}[x2d2R޳dc2p>D:4)N4}*;r bjzVCi[mEas@crp ЌR T5 Q}LT<-NAwo*i&-?!#??9Brc]z;,qm>d1$~o?9dg&o99Vj:;woAht髠.)bt!٧IƑP6"sVJ]QdEұC"3n^60qq^?^.t(rЅ^N~lBW9I\k.w~=/~~Np{rڊ}eh7I2ys!KRQ3YD? ɛe6facWL΅@.WC/tc@g$6U,#@~w:!Dyre.Ƈ JxB?`B])ݧfJ>%4gGbZZMZ%7KIi1mdePoFZ)Prh :#n49h=xФBr x 76-|0ZK Kqŋ=\B<7<,f HDx3}a eLYcʴ?L`rFe|`_̑W5<Eh٨zj+y5JY4V0]G:Q_[Mj<B묋X- Z69|?nVFy76Px~xW&LiZPNO~_ߜ!UM߿kR4!E&4R:y**Z06Ȑ|ݜLI%%YLtIKQk#Z&AIQY٣s/Sʢj1?&*3n?_eFrj"HJUi#T[kSk|bT4I+ I R" ABV$zҽHUj.)$ӄ\_0&htS fȲ6.7hIgk9oՉGnUCD|6%1*7d -e ZޘVbg)v-υmc41M +㋝7}܂Ubgڛ0RRBf IPM3.ȡ,# uo {Ў[  G6ɟhWn',H o暙$cM3g{s]vmo Sv76LjrG)R}I`@G$pdvfH*\}f2. +n}43uha%]&@ ŦCMbhWNgPd_v%@U3I*Rkގ)$ehWfh. {΢7+i8x%Q]v;M 4;n {XhGUݚ)7͜ *^F\h;! MuU:B !cB>i'2#ס- RxJNX]0Y #F2AgYQ0a.OAOkGꘙBhH4H"DkAKqFH]p]7l\XmB 火U2:QIL)-9@œ&ˠa=3gRvy 7mFa1da}8 A2}?%9~Ŷd-̸Al)6Y,VկXUppia^AIҕBTL'DN!K1E7`C Gp]Y4CJ`u79n.}H?}o~8M|zǍ%3=(g谰o=oڷuO=dM)]XIR39a҉sާ) tb+1X5n_9$*|5@D:.keoo<(o }r_| @t6h>]d?WQ\7F0{X"k<,~?j8 5 pcw -ß,ސ\?}))pg1 q3=Q(Cyf%D"SH,BF=G)h-~FkaQ\/r#@X9*]ÚPu( G QM p~-gBϣQh qTvzzt{udv 6*Y\WY3ƳW̸.=7Sj޲Y鵏xmb i޹Heۛ<_NOs.i]#5{_Xx8jRUb'~Jvq'i{Q~ʦuS/=>bb̧Yb>|l=px|\]G,xGfɓ ,y ӣ9OGs9ڕ{u@K3NnbSy=Ӗ3IGs"N*Odks2Zfl%W떜4($a͡vf ֋֧㇕h Qb=9U%C9WXwoP ݗh&+t^iPCsBb)gDUfnU9^AsSТc»4]V4H50ɱ|9k]4ߪ,"|6!XJ}Lzӄb6!T n,Uv5xڀCS88CTͤ]L #] 6.R*cp*KC9. ge1|EZV$MaU& )+snQfnSc #E+eݕ\3j7j56*եr~9-NO>eƢ`[a{jՓxY7&ciY7b 7vBNܮ,$dGв^A?IVu&h l-PXͥuW+)Xġo|NXUTdZ$#OI.6Z$m&cUh_&fkF^ǕZE% @01)\"hLPܶǛ 0(QB,^b ][Y;bDsO6 СD.>)CD.mҶ!ML\+e+cmқAH yK1n!q+ԠNa>/ h/?- p?ݜ.%s ai(~+p.sdDd9P xJ`I@J.h_)jyK~p322l?VC^x CJ}36A^BF83eU%cXSa[mvPrCv$8^,d2\ȢRb>j,'jh%$>M[_ʮ9w,ʉXŀ1w7aY҇TxCY1 g<}{(L)/Qtx4$B(:1FYD-JTI$.1 uQ`#ճmjjƅĐ\P$Nƨd2" ɟ eU~)NN̼޸3yr;APW;wxVpFhG+m qTvZ^n3 y3َvj #~N8X$FND8I&wE*$eHQ!pe3D'Țq>'^xbKBK/<' k(iSQ}D!H+@ƽ G3ֈБmjC5yA2;=5ዩdc^pF?g9ues'5H(NJWL*J_[90ZIIƦ3-3;}QN}_?wX`@El|3 XPDE2DTAh!x23u  yQw7d)[sG⎾/l4\qP\o):XRِ陀!Tv[99$5#C"r !DHEj1yvK8+h(hXL @;ȍփ$c2[M ɋ%$EC^УE*!h{Р:`JР&LhQG#A ]#ttN~'6sі,xM.RJJeG!hA.6rB,F(0Nl3Wl` 8w>0Ev n!L/ ^sq'Of`/Ͽ6g 8qnoV1ۉV?kW-\[Tu/}%AsOA*42~܄+ϕ؋:XJdG\B-$zf+4Y)7YaN']v/I�:GNj49\&\R̺ }7G w[SM &Y5-z Iw JJcoŻ&e'ۈN+Nȇ&ߌIiKbG%8}VjE. ާ֠P="mzOvSby'1T\IˉsE23cdb> Ζi? "zdj zCx]Hm1ɓ} Z>JLY+鹸A$XA$Xm΅ŢBwШҗ. >-Ҳ>滾SBg#t>&;$&U.V"pge(Zv!O ']IHl!Ool;S_]2*{k%>[<ܾ_w Iq-s!X8m\&xAcbF+Q+QGFas?贎Ѳwx};Cv!tHu4`F4Vmٗ흤 l d;@>VA_Xl/ğ @19MuSE-g'#Ršḿr]297Cvמ%,y4c+C܀Ķ9pboGaKgV{C gm>3֙Ľ9{R3 WYxg.dlYcF3ɍ@oOhV5l6}78+dq{IAG2Jh-CS9h-&Dîn܇'jۜ ćVtD*8:+Ԯ7KBWqF`::p3B>1~o_- pw~~_ȕirop0hEÒM[2-;Sd湎iEߞ̌Ǜ7?[Uxsbq:}vR _onj'y˦Mi5ջ6?b7 %Q}lβ\/՝ixҙC_k[fIu!jtcmd7MC6Ʊ;bJRZ:RݾQ9;@ݎt?~^1qѭgwW^y4>t[{ݺG<.<[^>|>ȷs.wz<}G?M ]`ׅR(mI;woocdm:~7xu6r8! {v~IQ!n~s'+[/{]ahLvӘW5^vPY}iZ j+1;RUr0Ju-ͦ*1O.AF ҋO{{y e9)S8fb~߃$}T%4=Cś0ɛh K%pYdbJs K.*wƗ:"i[lsz-Z2'ZQ =uGߡ`au"xϓahcv3R_%du|& \q4;ic0Z/gcKv__/cִ|qE>e^g,:jcM͓B;2Nj炧g,/|gO >7M[pr %0*qnGb+=%6beAqS)}1ۤfohWņ2'W nѸ6R\ TxBЊ&m!-W\V}Ku[k"v4yzdMGٕnŗ _ ꍣ_\&讓kXy{T[}Uv?_;zĉ:Z|UjW7/E|Hx_K8r %}zq#C1Nݥ0D!!bv5(r)Ĝ49G 4Js.`yr[`Y+˕8Wx@S&xFUJLu[Ŧ'}g9oh%:i 0wlz t}z[1Ej w澭U|!(SlLchZT@[89(p Ig9s8S0mK/#3ܵ*j<~%hs)Dc"'ZjfS k&V xaHShSMũcS($\4APyZ)yBڕN0۞cj%M9Ie9'4-xk2G;:?$׬eafTU%E$Ry*eg##@͂h@(-)[7jJխ?̏}b2WɜOb7m`[M0>w&$g8[.zZRI. Z(? pazVa-S+RR(\ nV6cϧ3.$WWg͞y,QMt%5.J$3#0.aVPHC `kї6ٓ*O_) #5A !1$->^( Gh9@2 wb.BSK=Lux--%)J!9M7 U01ju%`WN(KqU-{H3@' DMq!Vpku2+*,CpʰnHr}LDy$XIr<[噞# z:;fOCF*Ep`7Y9>v~kbӊ`}FU4`?$M e2V&RtљVm]n[af f{'3RsU-ţWq5 gDŽY[H=x2Z4jp[m>D*RUWS\sΜCwP~^_.\XP,VMRP8lC nG 19/l3ZsץK$U"@֦8AV o>F Fp3b{_7lTe͊m) P3UH{t2/}Fvh90 X=܆o}nc@-u|=/$om"Rag҉|-_~ srcߘ|%S~MzE;3J3yX;^EjP) 2[PoI"1(Og{Rnj&A<.a`X`?w"n]UyjnU "IպmkČ Ө+Vcz_,(iy̑`O~̓=)?UqTGDeVBЦW\zju`"s~]p~o#t^F"Z (@12pNʒ'jpMƃ1Ϥtq#`F+7{O<^l}-@T2AԲcVKg[`F~)e^ku! ~ӧ@>hnK %%]J=x;]N.`O^ӕ>d5YPkaVEG ;̩G @i[BH#uFa >fG~߾.ZV{r]LzcZ$л^*rUďGFVM>zqr惩 Y8sw}<{9S%9o岅v f+6QF98w**,(Z%f$cvZ%-liށKjz͖Kj1Z@9DŽ} ƊYWRҿY |)wgZ']h/8r-QH+yjhažy3+?M?v9\FW )+r;Yk AԽPdr( KjͿi1[ P잨5P{pS4&?.ws>fIǫwǽ(GN 5/,9Kr?lCCO7i쩆ؼZp XsjߴARjTF=gW"G[d|h!WDQnAȑc!C' P1g✰Jo"ETg۔(^XcjlbuKY+1VbJm<@ghdՈ}3ݧzRj{;(*IJΐN?Wy)w*֓2b&'9_*,P =*}ݦהG;)l_1iٜ)OrƵb2rFWӌk FPʄ;EZqV`>:yMPLNI'>$)[Va/&>:)>~Q~34Pm 01ĕc)R ӞazȆA'׿4SdM-"6ތy6Ե+N/!ih:MNc&byzpP\yo"vl֥+![%5T !ϯ {;=鑱`|Q($X3y"AΎvvtS巃w/MP)ˠw|џ>|l!xBY:7Y+F Ag靿iΗܣ[1F4qB FQ׭O\-tPXDݢW]i)Ò6Jc 'X=^_3=ݾE7oqm2" _1EB;5}qtqVTy`:slv/u8L#? pe"f@wlH\1O Rة7B&IĽu uDž wS5SؓZ;u@~IAgNPa>R!}#ΧdvGf\*S5s-nžNy-L}|أ˟c!ayeIȯP*=(-1.֖(]cD'uM-Be~wɥ0b蹉I {T4'.R sw-$w !_ }3 '1,Ea;t &xF߿{3|}ˊYbom^PuӂZ`Nq&T!&n 12U ǚ+u3 M+Ԭ0+8;H^}{p2~RK[a}* ܟl22, `ggd>K=췧4,=)*p^om齃k|(|`q >G#(A|t($~|(nO<*&a!XOGӐǼ"Tu##/)Q{q=Vu}3؋1 /)cXi1m!;XC/ŤxX0}I1\(p)w'[ZuhɌS4L#2ad__B]:b[a)(-P]kM ~rh_ˑli`2tqA[Ş\pc%jP+fB1Wiֲ6) 5n O ǿ?4tY޾s^_Iŧ#,!fl!_i⼹F]Y4ۨ#x~Y6qժ#\;AGs B'e_YǦŪEu'O[:;?x"٭cP\z^rFuٟR8`% G,jK1+:,9Q F6dF-tdp|˧{Y~?H:‚Հ)BqȋXr!'C jCk ,[-]0,@o@1-7 7\^?]2## `̊}Ӊr^r~$쾓rջzp@E j^(rq+iن`6+y$ЪV5ΚKN!Ju X:fW|.ˎ??5'% Ñز8ƠvnF!``ܽp*t$5Blϐ)͍x%a-A;E~ nCBT 8$(K)IkU[iH w؅AǦ*`oRt7O ۀ&87C_ic>bc=f̳ ,Ua"QYwg$U{A |׻)|.\K V`I"4zn1T %U,SE75l#m?)rH =:TOwq . 2[!Ebuz;b]?3(Q$KxL:G'&X ~Yp5z<&{ٔU2:ߵ }1N)C@ۖ n̕o{} /L+| 1ķ$-2ӀGlMQŐ 1hcJhrgw)%kqP{͇~;'*iذ&Ю 2Ϝ?}iS_+aƑT0;%#_cf2IH( :5<@ e+NL[Qt(m6&YFG.7o2(U QGnA#%#w* p@i!Ya" 'LXH#)BLY*<1E!dU`Vjx$jƢQ(\wgX a)_F5d}y a|6ˆVSK7 SI-_1NDl}~%j -[\:4Ô2 Xkd?Rd_ >F+~GH$O2U2IS?^ ŃQϵ!&@L1]_@kHIz7!hBք6"Ttcwm#*3/k\~b\e 3BMN:M 02ئ{nAUCu)7deMm.@Y݂۽Oe6g+\c^H)3$r5B,:(YVh4:] 2#T1M G}l/Lhn'X0&Yr ׎I-IPB@ U(uC?]>N.܏y5pJ$e1NHr}% ւ` ׇ =U+g Hc婰fP5>5h`ho6&i[(Zt1hQ$InѳdZ=%tXk2ҦECU[V*ygXAzC^޻vtHO@WNdHX|;BTk3JlA)_aޏy5F)c4)(eo,SBum\B.ƪUc^.#twiaA7xdVHo7pZj'K '+}?l*C\l`F {o{J6= ]>tJWٱG= +m|zV)" '?xWQŁ=$_l>eZ%V u0&eʑjЊx *p7`j`UCe4(tlzP> F#ȰUUgJ=JXD[{ֵ'+!CFR|& ]B ĢFuHY+(Pjʉ"b!H B})gom-y%sJ:R  >@_OX HZ+..36+Rlj*|5ǐ~] Z3[1c7=9>HI<Gn1.tB=q>e4L2# fˤ\<-UПRs:ʿy+^|_y@!Q ~}(;fv"2э`8N\%"IhO C砑br&$-?wK*) V8(V@Q)DFi_}6&ۤVѧ&(C^΁MC Ib?8ϲP[m, e*b,dtb>VDF1?# 3@̺|3 н/*Φ:k [^Z/E+.kXG, yLKYuv/Q`( @(9Dx##_%"SĥkP/}q56y64g68kYcj)Y* ~Q% Y5 ՉwZ[BBuGb?S"rP^QωQ֬ MI2/eP2KPRIL2 򍤯]x֟V݆}xǀZCŞE#&{Et_/n^tUY-0ů}yMsʼI(<;Tvdq'wG"g_? 4b3ysC=$U3{XFp_[(3iJNǧ]su=M|=]fi gO=]DYY΄gǜmh)Í_~zB%x!^ Qg%0o&*,#Pva)6׼q `&Ib"v"Qθo[[wr]Ј6y,-EeIߊ>N|:Sc[ucT;^ ,>591Yz;.}Moӻnp`*"CP- ު0#+SƆp_.@ R V HZ!{y$IŲͲqGBHָ! Fbƨ7I1ɳ,gp/[V\FҺ׀Ol2Go𹿺}!aT\]<08s3gCB-]oa YxwhL܅0;)lGyq90Ri t"/Dv-7 ˅gZZإNtbQlv- 9$י F %D;[!H喛K.: !gdUo{Ӗd*]* ̡REnNzgw,tǔֹ,$E2csHG(oJ1;V]B.YligvYkHL!қ7NNA8[l18`J?i'fz')k΍ٔweu_X؎O6Z/yQ>m{Yf|ry{~ djBX=6_ܥ/Ѥ|/K]߽^F_':yZgv}5񪮍 _JꍼvH|Cڨ u#\|xUdzB?D<0(q:4{q|ឥ'o4U_ uCi}8wwQr?~Ϸ4OaώF>MXѐE~2IZIlN5} iX»\mnd*UBZ&&uEXȤ䈏eZXSE{_%y9I-q;}֠8^كFH)qRkՂjO/mVnD_Fx1_0ݏTcBտL"4JHAnphNwrT.Y;]hc#:>CĜ^W" k1. Dby΋d ާRr% H9CG=EˡO&>(DT:<@_q7kX|jVޟ=4.N8Pk}U;PpsKVfO՟Lr ,3q}u͞A; YgϿ_zvT3fYq2/|wl6n9h[9iܝT2\Gau.JJr>dkĴ; ~J¯h+Lљ`86.~Mut[)EיOYgCI vŻss7+8J;8/D,q5@+MbKyx<|~Xͺ0_M]×{ԫ>娝0[&&}xmn6e6bꙎA{s7M kٻs sgcH} K"}H!xj"CU &ي`k(D! o !k[&xlJcQ6&Ewo~2KyZ̗1-)%%C9+fXEfyAK 6~j6~j7۸g/pu%{G4^H5{ԒnI}RNR+n0$?c.H|Oa&(;n}m8‘z?9uyo\(`ez؜t /A2&2ҥ'K~}r62tENTbZ2Tpx`+Tllaʅz8G;XbMKr"L!p i|t!sURci/"\ÇQCQ"ȱmfQLPrArlc y,:SWs)#D{(We,VdT͘h4rg=PaJ)DF12 +dcDZ\BFJuW#hjq۽d)ȨZ* ړ%aQc+1 h ЩхȉSģ^!k^0$iLCǐO ' l7b{FXg, A۬$d%Qp(3Ha 9]%{C|bj$i .!!!aFbX ؃=Q#nRZ$vvi %B_W%0i#bZ:Ꙃ ׃בO\:EKA <Ԅ4'9!^ n񚎫\*'NEE-˙=Yt?uKSt?uKqK  Ib*襴8ЌGX5.pg(vtY7%']Sĥﮄސ ?}(('zLb/`tZ-&jtY0G(?NZj5T=)FF@+(X$XI.J˨ZH\CyKkȗP#NgZB<,ҳz>ҳ:\I&v#X\n(->,CmiZ}"PN$ nsP hI멶4"vV$db15*Q8JԭOJԭov+5zʰkcѠ@+1^1c`X$h"LU**^8i(%࿨d!Q2!qJs64vGv]K ;n6r*4ڍHn |]zRoPAfs|@?)O~34x Ypw4O[!@iL[` 4)%,;Ƙ"I!%v5SGSDEƞEdYFBy`VB"`}(%,6J@Ґ'@R]BCnk#^HeJ9@Lj95 "B$ (G&%3kvTO *r.%`IKӽ4|-\I\.+Ih,WJ9KyK wz7pf=GJ`I:/\+[\%yy_lm#Y] zM:ߌİd q،67+?]_O*ghq]1"Ĝ``e-jSRThQA%UHZǘƕZf53޾k;[AZScO3ݛO+| +B;dn`a^.(YH;>0I")1'tFi|sR+(N4b~"i!jtߗS֑RlA$BiRlG1-HHRE'Ru (E'HH;IYΏZb»J/PG'2D1@c6$vSU(Ee.H{z/2y$ ;H*]5LMt_]6ٞ+N"e@~jhyc*"ljo" vFQ7YdM漇=4.8y5>7}Ǐsu O,ݿpns6{mzF#o{E\피[I6tdf茚>Li$]Qހ"v->`;SMVern>wUܜCc Fv&b u1n)BHgU.s•LALo"P2H-Hb焳Raі$G˰]q[PQ Jv޸؎Qջ &>udwӇ107tdq|ͽA'j5z++Wͮv%Mq0 ܸJڅq]vasUĊq*E8v[T⹫r=x7P>}j9Cfߎ/XFr7G/h-d; D 9CD{IZ“S!xa~7wUSO7ߤd x΢\ݎBvrs;y}c \N&7..0: t1Sgz~`zw[ lTMz;Nٚuu-)Q ׍yE|ڂoźvœluj1AWn畖@baHA a~~}ĥ٬ޅ1vρRﻹ%>yBy2\R !U⏭g&o:}ՙQdfVQ{(?\^nOiRN=sWsm\+F> fF@>5]aZD%:k;K[%/Pޓjhm9<<U1[L!WЮT6LRB3KDN0b)c%%O3SKd)=j*űreNqBk*0RL$O-#)sRYe};*p)M4K@c qm|E[y;ՋozpA0zBRmT=i ?J3B7[mo d-*8 D$i9Z&@R:K|p"R57ˀ(|l&7lӢb3]/\wKz$vuL~ZSjZ8N1)F$p _xѣ̉$S7 e(J :0ev0vt=JVwrM mBw֠P| DPѦbZy˔PZb}:Gr晤 A`i1>6'm>*Wi5GW!v.X2a0G%ҌDUXL^J9 TVevO*E?ƠrV.%BfY2̠G`"p ^V2}ar3WWuʅr>S\.AHh6XEWߑ`6-[N cy9^}>dp SaQBa&%m^,U!NhL^Ւ?̂iP`Þ\9\MGoZ>/ 2/'~"$XMDq#'hd)FQw/\n.%ʴPDbչ;"Gj.8GXJ 1ISNxZ=3<%.)j6Ky"W*bDtep/Jx6,浛$*73l }S쁌k+i<R jr(2ڪ;iD/,Ɔ͟kF&#YP" Nw 6e 1M&Jbic 4lL Eds8ozRX%SG k k]<&Q S9ˈXDhqPcib3c޳Ӵe`0Y <mwXpkQk-\Z-(MXN Љ/+Ls3c ,V + !X>zٺXb꾸CuتtJ3=U)M)  B=c$u +jf+˃ o_6kP慤d jBm<+Ӄs ՓB?XF= g\u!pJcS!$K59@y}}7 2ouֽk\2&,P2-g|4@.bOq]O&ƙKj/_ |ce]|Wp(Yius(lY9+Nk4 =PB2-B+-:+;Q2CPLv ̆Q)k )}E8 FrM#$(sH)T'9Kë뙪!O_jS#(=sU)qO*~U!'B>ƽsU~٠8:sU~<9eʹ*Gp.EuLQwSfSR)e67 sr-5Kc^4_zGn?d'מێa%cw_3kF a3uAgޯaGc=(t6Ӊ7$N}ձZ *1 -#{߾{=0CЭBq${6`=1g7VK:sfst_."{kɰs.]=rx{}__v7g Y8~ ?ǰ 9>oUOӃo\nmOoܗfyNE9ve7_2kq.03~/~H7Fػ_[_iq7&T8~{cK_{̄wl~=&o@an\dsygy?h辗o^ z7S5 nÊm:n؁G;6GiY":LL.]ro\h3|nO"i6y }i5㯍;HSe.?\t2J/`^\\|Q5oA_$E}sGu~6~~ϧ;*it_3j~g:{ƋVs7~y?6_sʞ{s͚~qFA(u>Axu]b}ѻ9~; Y8 ցwo Do |r!?ͦ2i?\t.ڙrq)d2}oE7Y\we#::ܹP(?-z`=?7]6\(#ruz}b ׳i,?Rp!%KgLY@uօfۉ6N ezzJlWD0PV k9(gE`OH}ISt `5!&XS7HP@?pH1yӤU]3bΓ(G]0zhѓf k%+pYnI_ğ_G:iZ>:N:? d$mJ4=$E;LOWPS^HD(auCS T@!'.?~qx]k{_(;(zWy-Kz:|!. [puhrCiz5YJ;Wz2B86 7a7/;EǨl^6EWu8-To, ƘP;"Xiv':L0.v(3Ww|̼Ωbɸ&GoN[{ŻZNvz"ӿzG)ӅV5ۚ)G ɮ-[Łqk\dpqpv*8b/3hXVqd~%/b饰,n尿Q(Z]N4jP*rCOqvS[zxZљ,֚h.Ycq#?<%;IK,$j21^??썻ܺq gu$2J .֦,8}bӘ&Xg%V4%x{5zMP{΍c.6hsS(HDSt6M)RxR6XDqCXk3!H`kڵYe"m-i(&,MiA;TÜ!(.U0<0X°#8PL(Ѝq4#t-&xs(nP)@]jR-4&&XY`Ɂ 0%aJ"f&Ud\0jŸ' 8eF<-8C B^B"R3Č -2 e``Px*(c21V/e,I `S.1+VXe_?؀q|Kb?<`*fʬfE16\HcNf)aUxe˙6͛n^e1lXKGw(+ jړO<$A~Гỏ h_O^<,|՝W6(ֵ&Ͻ(͗<ū3NU/WB~=y@\w4ADDGxi_*Gk6{sLK7dj5?{Z?n zsnC{}H!ݵL*"% A|*#ݱI8 6'`1NVc-69rTM^8\> :́'UC 齃ѯwcyy1NקVe.\>WHɛS&eLD_s__2kıg}к eCKF*6;V"B>}>߮#|ц]g?,OSk Rz|}}7t\WH&xO*UJ ~s%o~LGdbڗg_~7Hw嫣Ic3MSϤLWdOwgXl߂fElWN+rD \dJ;jN\;jK2F42kmiZ_ʘ (ĘRYɒQȱ[ͱ'hi.Is$zy>r*-{ Mrn躆Lz0T>QFC154X5ySϵ܍ˠFđG lz]Zltt5;!!!ɆQ8#%ϙ|ÜR0:^S,A('fkPږs,g ht}&7S-Zf&^IZWɷ  4_g/B|K@>ZKEG/T|רi1@a8<{1om51P*?&y[^*欥?俲I:2Y|폕Bqx~|~|Ǧ_.*s.:1 IRFDDJ#u@̅!dDiL'_UV4WadN'b&__}~X,?Kb0-s!f#2MW⢖Ԛ#AMh,""\pDHy 4K,[& uUkҽ`9sIۓY (ZQߞȝA :[kֻ@A"k@y5 kЍU[a* ~% (vAFGu{G_4m&#ئnS'm.}h&po4ZWe`В/_F t02PHL)Ft*JEMN'0'6Z-ٗdӪo%ndn2fQn|Q0ze C|;SW4u= 4ջ_ϻwF4I&Wؖ=t_o|:q~iv~I',ί(kN>b0*L(+D~ؽTQlXF}͸[/s-6{n| Y.n|ntr~\1/aqhm@fS8YH*.sbjN:ߜ' `#ZO~jrF`q[5mG;_~Id/'O' &WP^#UrA 7zj cMުi:Z^ǚFkWHx^TWSRHI)VyΑד).UI1X3A=-7nbFa k5+G÷f>ZʸIs7됿UR+F:&NDisC;7VG&XN%jԄQN5NwMۆ6S ɯy y]>Zpv>3bĽDo-B!w 1j Q V 0Z(BFB $RY@ՅR 1&^ϖmt{J$rA FTm45Bͽ'+@`A%jR:yk0R؂hJf*w+B %|RZb$ B 2U(xHtڀD |?{üW\"#nb 7 JN Mi^+]lV#>xMA~O+C[A80ipaT)GMر1d,ڭ[fGywI!f{4 9 Z8jhTF1"eEȌipZڪieq`=GU`W@WLF2pW`PIh5ц e@I @/AvK|8brgl`x+v- A#U&WYt%tPEй-:`]41ɜbA )I½$ҞHR2,,xӱfYL5 &|O%MQeh*J24#Cx}SF7CQEHz?j-儮RZf+yСTD%!\2PIDn*c^oQ9Q0C,)h)At]R   8pvkK|?Pa+PP`4މLcBc6Fά"F d1vކs8A02)8^d+R"D+l:r%5ۺ^-*!j%h,J)qLJHD} C/=sUN@@`Jw(CK| <$b^BjoG'P.:0t$I $C41)p7Y󲲎+.kqm2.6qcXVGBkcI94:bGD@i,.g{PJ|zڕ/[M_2'_&26vs2>eh+T΋eR[.9͗pyΊ2@QͶS(#Pf lBN<08Zv%{bvwʞ 8WKa\h[ؔj1T= .Lu0`=ŻJHeedPVzjHLtaG!2% ǯe0ZyNztĂQIf:Q1FCVI% &vv&@#EVRCu`šoSLcGCH) S]LJp_hI_M 2vݓU@MS 3VT” ܗ!y`hĶ|夏Ag!%`[n3_\ BKGv353 C2NhNERDGؤQ]憐^("#gLP*oqmwy`RPOgFJecCTJI1&V d:^p`dA)z~cۮ111RsɼMYv1o5KsMS($p,?Fr)5.&hE rզD3 ,(,>ŴTX>qxmIMC$ov.u8M [߬/޾Yמ߸Vެlg }dif}i1VƩ5[KXQ 춄z9\Z}49Swvfn4c7o 6<.4aйn-UJ-$N)#\Jv|)AG~uv̅ @)6Ǣtr*djv>e8'0ΑY/ S}!cx"]_6Ӷ ꉯ$dj= G1I,ZCX! WJ!'te?qLbD?5&؝zۚUsm&*XGLJP0oeg?l.1@s/KUơ4 a4FxR2Pc{ b1t~1`L)"i4m3Ȍǚll@/5yt5OgZ8B]|@`{"#^{?\ _c1HW=CI×$#1W]]U]]%qwD⦌@Ee i҆nAҕc;6wRǮ=NzCcӀ{ݛW?t:."@ӓH6õN%NJ+>#GSؓ lS>T=&LՕ 'lO`Ir`ەyz 8{8aTiq14E>ER}@W|}y=,wʺIҘzeI圹)Kp#s`fԠ>P-j{&B=8='-zMz{4㈫SCNrπ8+Gs.+B=C2u2ZG=C x<|[>=8190eorgZq2bb-7#! 2DT6'! Z"L\Y8OR.}Qwk( #|C85MƔ(*qbb(T[ B r𱞭+TB[Jh>k'Wro?~~?4 K%N_ Zh*G|Ci-6 #XO' ]TO<8tm=%4Tu SBϕy(R ~\3Ws(W>럞#s$V+|6,]?Y]o)k ˓'7c8u ғ2k">!WLSN:S3!^X۳jD$\,*ka  ~wL 2#Z/—+?en?C0_|wh t:'۩Y#bx(~{Q)2)Lz4__?GEzp'EXa9_8{̟]NNl5vX& /$bJ&<@?6\{Rdc?rWs4f\9GB$kW%sP2E/oMh*GS< USan>eib%*Z nbƐ Haۜse 9b@3I8&ѿ^糢!_ov磷61XGhcY9Z^͞kz W&L ^OI1'eVnK+kE% tH3b> /"J:ĩ1N\aNBc V|e\˝'RJTm6M|S{]ԅ;Bys|fBBƞ[qX)fw cD1 ӄE6/߷XiVjtPiۦm <Z [;TVg?aVWsa"%eV݇xP)_ĝ P'<~Y2ף|>MYYg*dΌ|نx98WL"doP|Y]aj3q߻&hcvA9'm袄%ge+]1J(/16LvXrXe&22I=&R qieȭSFHf_P(eBGU4O&U z.#o FXlO_oI#1(W\qˌ8DjrS(a"FqeI-,b1*Pe.% 46l]9pl,(ӱ';_)a ÊN͇ϙ<ƔSF[O*LfAJQOFIPFYt2xJm~+I@zYt;S1E7dP%ʠt9R}vo9Ԡjр>%1.MqN">zϲ|ExB;Vӣ30gt5g1J 1#k }/yfr0,Κ@9Ѥri C e7( wRY}᾿M `f}[+`ICIݚQi ی2 NXFD*6{xͬvI6{v>iHI尕rp`,Xz)εb(9B$MA Ҝvh6^MsHC{~^}l1}΅C3T[%wx`k,Mr@v%6g*)2fȌ4Cy`I* 1]a6wĢՄpS+swrl`aL07΍͜2< ᦠ*6pY,&WXKXat\"(!nDm \}hُg]a%, e=PX`xaVf^J0ɽ4Y1\ĢX&$$M d)#=9=^]l|rk^GOwUqoa^:{VM<)wяաǣ77'=}?*\-"zzG mOi<WET*Jԉx/ _`tʃlA%c֨}3-B9 QB\gkgkP~y>_.js+j5(HKv-Nu L=]O|ʧ+Pm A4j8w>$fYN8=%w>;c-E܉zKw{W.Dç JPù͵I;_<=81;/kyI 65zs"Cޓ 9Z#S`ɄvқG 1H+AY5Vkg#ys-sm< OZ=l NYWN.P/Z:9 H&Rs,& ;Tu2i>kHj3-hO>K f /(iUr!6&F#ENZO9bHzO\;Y '|,ՖA4Y7 %b3rnI3WXfuvkΕRZϟdKa5F 2\qԪ5(Ň:jEpδreæp)?՗v\HIÝ^3BZʬE$͜ydpLaI#Hbf#A90l4 {@/M&9CJK9%C}Uer&Zr3bHFNbȹb!gQ`2(vڋ@hKxv GRhwhoTqXNX9qFIn@IX'ŸӇ6eZ'5C)$9,!OfjzE&Y+R*#rJ!ctΘϤ:H{%zM!D+»!3%? 71"yd~E'SP n2k#v^=/2ŪчWO?nOxFּ^poxwoU"ѝvi.?wfznK龬@BeSpu_oQG/߼{#XL>NJlX:xKP&aI+y P])Ҏ (S}'a$BJɏN:JktT<J$zTB9ہ=(ѧDQs\sE  gm+rxrΏ]c'Sba+03}O/ﺃ,yF޹r>q~ۂHmtgȴqXq<{xۄ-SMXNyg56P~_x.7|.af FBb {x b&:Crpsxvhs3e 젖@PҦDTu!ͱ4AO10LAlNr & ,0qFQE^;r(8WzÈR[dRxKPg &z ;!i E?{ƍ?<\q IW\{4PpInWGv-Mlm).hyϐ̰ ?|e {<~uN9t{uT>>oX^ NFY5_:u5Ƶu bB;hIEY B-t*ej!T鶦]4] xF+~E5&z¬8 Y¬洅٫jsd]}aŵnk=VŌB VO)Ѿmp&nŷb9^&NA;&8렗O~Y n~xèM9wD4f;b'\:mV<KZ Oǝ0UOjH!VzCR՜؇'oB4$Q`*j/6C 5Zh51gɪr$rƇ:0.A҇emٌxyGȯ~S4]kr5PbwW -VW '*++G9 n$M?{9#TC P1aؙr7 kAʊZ0*p x[Y޽YW{ɻ{ad؀a-iה?" $#sD!3Z}秋'Lb^/._w8Q`rp|/sEHyvt 1{Ooy̙"F dc.59esUmCxWϷdbpW# 3jvw?Fs)h=eb7Dg%^`}U_bASܓp])qN :ڿôպ,A,v B1͡9އ-0/T]rJt5g)[Bi J)s n`(fXڅ+M`[=RL|qRsyzmcuǀO/qhZcee #KR/Za5kH]~NS"T 84O4hl:+YҜ D!EU biJ;Nm18'{щje`>T&Jt;}Ta-XWU%Q?15tz2!($JXt~p8 8_> WZ:Lj)"E&аi " edP17HFy=FH9jH?ISHR.,VD(&X!E*kGF:^'?dӏGM/{߷ퟏn./n_d?ǭ&_f16:1˗+|9w1 N *{#k{t}qO^O!mG_>}|d /^]ߓ:|?tT1|Gr0Tc*)m->+ E w+f&<'&D˿% ]MV%-YrWmGz~!G{C2[R@g?{洪> ?ߡ7zWȇWmY2 +][\8xz:[]E0'ًwxeJ.֥nzpH7ƧJLj ?]wQ3ɗ:_ͧ0^ɩSFYGc1NuikêJF Y/s/ԅ&$I8 Tpr6INtW7HomJB:<%Was@(0%MHjZJ4DZ3m%^U ChCsxrO%MWf3dGd!YI0İLGb!gd v:.fj Ѭ+L|` hof3BQ1Tp^JDEqbk5QwVI5(. ŚdaM$DڌEA*p?-M6VPH>\U J jF(S[Jv5oxE#VbvfjWhd<,[-:^$򔢓F z {qz;٢c?{b6T1Lw|%ڸSEۢhpjpb1_(aoyF"gf+kv̷[b<.N9x\boG+nkVy% TtQe$|8D]]=*5BDMf+jbp,=Oa2/ 7 3O0ʠXԾ0`#}=>È>޶¡^ŽRW!p8v*yľ*~s󕋰XAF~./FyW|1|$*5x'ӓ xs~1m ~숓DBD9xq4k)ZYkdE`c k9sXF^ބ>iR V?.sZUUk F;8ժv QPZMN~&PL/#B~!g=ˁ"B~ RUo6WjQ"9_ix!_EY9g%xg!µ?$#?DiZj Z#T(ve'y*H\I EN'E9SKjdQrʈRVG 46)I(@ŏV*XRYsaLL**;{M$\X7Q=ֱ흅DUOaGd݌2K93jW&`fҡQ{WVIV ᵖAQF{bS޺ ;C Sb. 1IMa0D@,"ㄳ`) 1ȡe ˶S @$Y}pkqGt6 + # =k#-HDk@*j{ZUȠS"5^Zqz?]<Imyخ|i#1lB}j`>T^|L!Yi)ڲ5/}@}׆82(^ASD4Y,dYB=¤(!lB: Q#s65݄J#Pf& |/'8C ͝r"tˆrZ}.._/c `]9FXPv>W)}p}~1ϵ V{1.6wp 4`YyVh)P~lSQzgi(v'l@{ .)ZY@AXP4zgh--*B;AHH.K7a$vv:HSq`eD%5JJ|(fnF\-t$q\$4 1ൢANQFTZy2^jL2V1jYJID䨡Z1d7a|:Ccӄ|#]E?& ٻ6d>6v/Ч͵Djy8 zǐ"3'vc8飪WUy/ᦹ4oxR\5&;V0@C6v{w+x+j hh,$~m5wZd20~p \T5B_pIP8/4V D9Nq΂#`!A %b+ hJ9[PXsq3؏gDDΡ[(0$$_I0x4,iav6ΊvۻF{6sEY 3n>Jf "Xt*Hk{2{$Us3Bm-3 k"ȉe@LF8()*vBGDEL9$B4; 1$gcwNH=EA)no? }oP>z7& c:7xf~49Z(` lbĢ 稰7R9Um{L,&V{L, Al=V"ym84rs-N4Ɂy[),ψ&9/{,N# v&2raǒyCQ; 8}=+}c~^3PwՑxt0SfjSkgVPzJ`2"RFK0kZ+LQ~AWN r^a"& #A$0'*oQk#{GxȘ:VZ Glx),.7#}Q$h6|?'+ǣx6nB' Xp$߶֥OG4&:;? G ;>LK)(vB0 _F/3_T?||{r%RO+](dL{a2+B"i'[:sJ'iN<91ZD{&y6\Hf,$o//̮apЛq} ;nE hI$݄.o G0>^|{ٲV6fk͈WZsr"'R='q)G\[vpǂr%5!Gkˆ/ QBqLx (aTc6V$[, jZjJP؂kxJɞ7qLhB LFX#K\`'Q ;|DV |5WZHռHj (2sRaiAb+--ҥ&p E@ 2jG$6< dM꽌R T^S9WZZ1Ş i"\!bDEAyX9Jߢ=བ }y^-HvrC*lJ ꨖ`{9Dp#@W輆c8rVtZ`1%1B_7{0f^,+ݻ))bЍf׾3MSF_دMU}&Ż2DE(^myY܏¤x~L(*Ř]._ʧB7qbߥ+y0, FE W7Fݓ2k=ԊsuՌJw4,|C<.e=5Yڥqx,m:پ |-_0ưq8,Q_+Wfw/yNcfoڗeԔGeŻ##jh*HL]41fR=.iv) EPYޫK&3BlX@g0.x\ɈBٗ|W PmV|y_)?0IAzEjVqXU9xq|V-lZx2-[|z0i$K|aj#st ;B:ėbA~=2(^ ~%ھ?8P8azU(^n;9)yNWv0*|{:o4ݼYcJ'ܣƪ3)dގhvQ#Zlm>! Ӽ?Ԃ!A j,HގɊBP{@ C8U7y_a8 1v<7,m&WN O6_W2sr"{#e{V]Uf *aGֲOI,t/ ۭs'Vr/[~zG8JJ2VUAnU<9êgre_iùBk Ñ@qU s[a;o/!AD%XzH`pIX+]T285iM=Ta!F#!=F)xO'LV':I4gzNINtJtg{آ7O$#ɚ,dKF/dKF/ۣŸ&=22[{O?dO?A)v{-=Ӊ^m32_X;(-`?8y| fKhԁ)gȷ+xvgTJ4A=\A 0UYEJ(@E s@&,0==OdXР!ⳄGF( +_QRuT `zwZUKPqd,.i2.ZesX-E$˯_`az=iڬg֏_=ԼXD*_>.֢[,6\q*2?;j8GB(J9IuMdcbW Z{ 㸈nJ3} 53|i;C8=SZ8E)3Fjfc&=ɨ]ag,QXN\߉M|waF[E+aa]߉ReQwam.F' Hyw ; Y#XߍFK7| +vˤ[͠E>nu ^Tzb! 8W !ְH10C*8't]6j=CYjU,ɦSj .i\ wa*'jCrjI(ɳ˩5.'h\$C Xeӑ]NR7K2cBr K`-`Vksj!($ٺhu̶R3ݛ k2= T̶RUcF !LyG`li/p>zG@YTԎbE\(a9g56`a\$֙gu^K$*#?'6Ff%X[SUapsi~FSS˹ 'ERYU $3))LHMD5z-1AU{ {j$ FSI xr0ȭ, -L Jij`d%7{M8쏅o޽}9~[ҙ_vٵ/ O6 ^W\HxWIuX;:KQ¯ɴ5w`ǏTgޥ+Ui:l#zIy@ Ċs`ьv=m[EL ~v6h4Ft궱v;xiV5!!޵,"a1c}&3s y &ٴH[M%K6)bU_UWWqS ]ܵnŜѺŠu;_>ɴ;mVѲ֭ UNw/w4yǺ ,d[] ʨN1X孝L"ڌnMh7ut`w!w'ȻŠu;-O9uhiր|*S`Cpn$v2&U҉5]˲=8 dzʺno^6U?|1 6%JQ^*HIHT)x_}H)I{ g߃^B-:vHE{,!_?p2M C/mXWOX|ށ9987ضċKbz dS͉Q< Mt%Q-QȌg堲JBό?ȏ&B^k껩70 Nu}Lܚ8mtl6lMBWW]XikgFpWn/}=4@a/E4g'j_._>ē6I?^&v??zrdt2/Ū|exQLvfҹ Y{ -6\~G#>Ѐ˟mk;z&8c˗G??xV%. J7~'7kL#pY=C Z1E Z!Q'cAK~\fg՛6tzsLG[+; YٵCk.ֽ]Y^h!1\ !6tKطqqM·xUnb5Ob8s/N kx ̻VK^ 9#WhEp=(KWfn7޲Cܙt&+4z9yCΈBPHʻ~\%k'KخnȒ!"D[M4:$w&.ieB}Ur/j*`:}FQ5"+UD}֊Tx}J"`{E9 s!m0'l7nS=4;DS`h1 zhh5qPA``@Tc5`(0ihGFF@5?FkK6xA ~K> P-Ӣo_-[)B/1, 2mx@DZ츝,L`2^ Y`p\c]>fƄy9? 0pU!H9$*1rUDumg6 /{mnE8Yojl0p))VRb{*ICSA@(؀RY }/\>k,ST}A q`FV.}{EK-xTؽv_0_ "1Ō^F0[ a f"pS$FBzBZKg5v (tVaJrԧWN< Hq4 HI֫]WVsUġiaSMJ+B B֫mF$&nU+B~#U*"puJ説6Pr[I fqIb׎mdy+=p[1װrIXaVq8\P{uWh_ I=NIjZʔ&;4`wP*d`N->RhxE ?\BfW{twZP265YPV:T+/])}+,Tr@/ &Ȟ7YKPRҧϗ+P=zTpɞ ˞)&";ЧBv:C7YJ̓طO.ϧ16M/VyzX3Y|jad]ކG} b!ւE4P\ ρ(9 e,6iCVEpct'Q>)zqMh)PI,`$BSOVp?1(Մ T0L:4tH6V` 1T`pKڍS(cVK5CQ   >RkABDFHI Lׂ"5,5z; AaMJ Q댉|9֌1!BP#(TB͔ qG,FY( [D0R ,2][Rb nĎ,.%CW^xΎ7,'p$I!=yќEWDי#G}tlik{^6F9j9й"G\̗ݸ k2b"").ro+nwP3!zqqi̅R1r/ _f6,>uv[jb)~b-Ӽ$灧(}ed!]^J;b%g\+rN n<+-Oa®@?iZE[KY#+ߤ6$UҡH|o8;$;j(\ו)vAi4Ku Pr X * ܺ|˘d4ҧF!( >( e|w\* >D) E|"Q$"〳 68T(@!\ 2pe(i\QU[sMi*e*nn ^ݢT%v,B vn WY‚S~QS.5v}CД (OG_kPE,?_*ߨ3uη%S=dQg1[Z )YƋ8u, fLsXߖѢeg>[U{&*'@7nvJ5|awhmJo%89ɱU#\(ĎQ(Ĕ.r8jkjJKN9/Z޻[/a5hf7 .qލs2|œJv"? =2w GzR~GP9)3Rh^ Ǒ='M=U⋥Q Y9>º ^D)XU FXGF`OBQ `~]nTm`Ԃx|+]%S*sy׮PU:RjOTK5rmw$] `hVY˱:L&EfNjĤ7Έ E#߇_83Y_ۦkM9-)ߕ1sO\:ur*F5V͒v'L^mzV .68L#f3N@fbt1TW|̹ })}?$pT0@c!3{C9 N&t;v'H^< T$ "b" A*(H5ab9/3b~4`Ac__{v|)2_)U73%V_2_A9y"v!SV,ІÊK%22dw$c 7NY6Ϲ[Dr%bK)0#,vɚ!%H[)zg0ÿ}6kiu:@vSU{p'8ޔr2^<ɠ HQ2s| &iUw )6~JA98{~Z[ےcvY*ЈRImSZzN#_WscpMR3VEuk"0pq7q\\s -+hy<c6λ4X҄,% aZ{>۾%3dm:aKjqN@܆Ҟ 1}$~gxV9iIP/.CuY3$KO&Ѱ28K ؗvo?@i鈓Z-IWE v?sJU^^A שdP`vίu%TbٯЅcI7WЭ 5K0b_& `E :$@ mh(M@2BBH\ *#B`BJkTsċJT VV@UL`u|zKL ./gM;p0ni(+osLfv9厡{x4+L& V(̮){}08ײlo""OomFvlEfnFCh7FS.Lps# x':o.\ԂsQh:ڀ[EKhuC~hՆ*q|hQ67A-խHys\[U.ZE֙ SheT VZH|eh,- [.P YޠЊo }lJ7n)4+ٹe Ǵ=Jfcjp:ڠ)ڍ䋝jYW|DU3KvbazȩƼ\PߡVgeHҹȹYlw>˽GwUc/Y!Zmk,o!ʗ$73 3:R8ID޻xR6IٛE2Ro1Iw-Y!"+ҊfljP p0>/''93Ueuj<;`")M;cD+O`o>S52=13{6>+S &E[8{ug.~A)Xs-[\@jgꂅ)a˓ι@ψLYe[i A1] =4Rp"P]جlfo3(&9 RB2f hNcPo#A'B!u`>pɼ 'CxQx%W Doh|kѤH|+^M埦z^*]o<}ƚK'7Xv14c|Ͽ}kH\Dǘ*ѡRy i+Y@%(GED2 $RNNpV㡦 LXq֌UkuՔ:PU: Z])WUEʬl"U>5^b. .PMY~/{>]c\fuYhMl lX07_>~Xo~(o^>X'Xdz>EFX`8/.sM_} ߌb_vV^/MMBQ7ce= yTZp!nZ^ uuE|)hǯXp9U\,,۹bwDSa؏0؋8T.j]n'k 1.s.{5ƌJ%j i{56<ф9H,% DW u@U,IeX6l9UB=TDbc^CTk7jTÞr⻛c+yOh&Y haxD1E""IAmx3(@DIe0egS"i6wA}³j|GqWG2JTi$G2c~$8 >c}x2 "9X'G`r8?  HJU>V$$ Uav8OԚ#*jFK-ڸ5_UAp JC˗J:B8bUn#7(PhB Nd$uoi+yIkdI^2[:[Nz$!ŋb|Ae;*6h;ﮁKE$/mgK QSMCj+|S$`06!>˖fMFRY; )b/'Fʣmd~`s|s@!0A(6ud$[ IH֊4L 1!($dڰ]gAv%F^Dd|w1R# &8F|q6`Sz²RqKV0Na r0Sj'fpiC^@HNoUdB~ Dk&Pn{!"ytEƋ a RR)hZA[V .<(! HCz 0 Wq8G4}6O.Dj"IK%֓D &fv2-CcMr63UAv9?K(COLASO`Q"Du|?rP>71J]J@}%{XH$hdz9"k.::o?t8TS\b.:fRK^ e<'Lg7f"3 hӺTY$2z|)` mĦH +j ȿEgS*RWvu["zX GҬ"ٴJο(_UVӗ_E1u@ wzP]߁=D qQ 1 l)e*QCŧJ`12}y[Z]{f*i¡JQ=(.:%>U"׉+ \BErH<`09p^T!ZfhMC5H˲΀QfteY|ŮNP@#L-8T B^v^?-LjFJi¡J05S\.p*[V{ wxp7X:XW<Œ ܹJ|A`*9J_)PcHFuy% k]=<{!Vp.C@ܽ;uMb֌ֱe*HqYtQIP "qEA!j.F&~F˼"|osy<]Jjaw~:6)V)%3F6rh6Eg$e}k:jd܍]33 ;:B,8BKJ`9麞P)g[ ~x%e$a]~ʗgů&So:֭mF]$p$X,E>ѰpI46G\PDIT@}˿L A`6r$L=%s= SKz(|:j@v¹河EHǛ$)H3piC^oCw4X?,~ $@Um0};*tL X{C)QoVvz[U(aqh[{~P<ͪNWN?yʛU#'q KQoUxSgkaHoT72k(NSKb%Uy|,R"3g0Y,mL2AͳujkJ֕U3 3˴TIe OHD{T}ko2.0C,di Җ.m:4 %W{]Ji=%_Y)<&A{ʻ)}PlqP\2.a4Qe`WU" 6] 6~vYB+ $ʸ,jPE'Ke}"D꒟5p%+=c^x0إ)9&%OkDK1f v85$=C^~!a!8ӑ.0GXle ~)i.:A7CoLZ0-\{ŷZ!xx[-w@5zFy(s=*>W@?#o2TK9R Jb@:f;{BbnT%n_'rc񲉥 ?/ΩI:i]\lqۜ|b,S47u!l"X\pJػSga͝bsۧ￿֮ <շ߯A,P|4 iI4$AJ4$!9DslFHZG hA 1)) QXujFPyIw_;J6i'ld>80˲E/wJ)S^'꧙S^y7.)zk"8G8p)g!;oá܂巣]"D ։>~xw2!UI"@U8(Eg^( Hq&Kjt| ϝĬ| pM )rK| "P@q)@8m&18W>aГxI9O۠ETqKxTBRF%yX% l&X (1x `I@lhc.BU<Nd.Z^>VIڸ⮌Q͛j4Ϝ8gFj^y$abN %<@[FFMnFy1}m^>廱9'LrA6unڍ ahGKOQYUpCZ۽|ZhI)!qha2h95,g< {Zgܷ͜"tiʝ&an?7ύ7]_+dhܻnU&@p3Z y!O$l>ꭁY쭙7QYfjL2+%Dd4ӣ'oه}3Lx晝Uh=>g3N1X0o3]a!JU|3rTkkĐu9CoJT`R0b c_UrSgP@ ȔvɟeƐcdVk$X@u lVh02HGknaV+|``*GZ0*rz΋`T.4\^.tˇg۟n~߃ٔ'4}(d)Ҳ#pU&|~jťMHUѽshL05 M*$Y1KuloʢAG?Nh̳2I?]zaEI -p=8V&֙an8T*#$18o͟MFk Nf@Q`3":I1I (;H׉H-% pLRcFݾƂcSkBJ- %87OP!A865 n[%03`P#XB%6Y@HN]D ~:H\j MC,֞!_-e =qu,ה]"Q*Dʏ+^2iv?{NyLr&h: $7x\$]EU}WżO\-=rud)kB0(>S[ NuWrkmP [)&fl<d/vnxKQ(het~dMʱp)dwc## # MjwoR/KOQw~IPsMk/М F 1^p’_ox"mē$[*ᬑ&42q 7IJi{HVO |R^6}[gaa (1AU\Ac+ul:pT`=3,! R,rdڋJ\eBk;5}h-8 -*JD9P`TI@b M4Dah T ( iEW*B(bCBY<2 5+UZ5"9xiMp-942bnT$1P(hS`e|zɜD'KU /PcX:z+ 0*wUrw<Qq{Fh=3kh-qjߣُYL]4:DV<D^5UX#p_4rSߑ}%k&c:N]h_+.S^f{3+2/$IBq$S[t)bGЯ!xgbs}K*UAZucJgtOi{[aRhr~:O<v2! dO!M=;-$r?9M}U% B\bfn՟OZoC(*=Ǽ_4z;g)sb hHRi\Ha1|) SO+6e_$Z3h]I`s: ?jwvW@J}"G/<'T't@*-B20{jt7դWJW2<A>A;VG޺VH lKD=tm>'. (<;Tȷ>~At% kEF:qP'gV<=Znu3TNJ%R|밦 "%T s,Q.$~77?u`P%8@HJ_̤oZ,?تDXXc18ƔJ H=%TB2QVB~v٤E RMe SHbA&kN|9}D;9vݪ-r qql3g"'qoϞ>w>N1& 2{*ļE-==)HAz\&lQj/8m ߒ^+vE:0^͑ ̢n=b[^C0`+aKq#WP`QehQ0_{uFB$!ϭĖb2@wL|曹{h2~~t=\wzդ7'bꞯ>+殂<֌|X Sȇ:"FW\U.DRjB&~\0r_UOjJW ְL<頯W8{ u0y; wa+; ?:D$ J( x0:4 K`[(DFؼsn}'OZ3DWQs8?~#+z3f?~ZyE]bʄ8k7MQ_F`F.?SX5$FRe5BsH4@IJiˤ%(IjS)9ݾ"(s Wj4.iL Z3se^wStm\%1co23#ףpf&f5dsCD{ џLJq]P3'O|b"|vl6^zt]eJ{F fsi_ GtCyZvpx>388_+QԃbEԛ ECגەT5 0Hq:5`6t,dz*Lb}ӖùnTC!FUuB@ҔAƵnrfq:BH%VILZi!"PiE}SeݐQ-FRЄюH_O"UZrUB5,VmG2$v xOii \XHi#.GRZ7ְ%(HơY$|sq/w [r>RzQ:ϣMy |<(A*(Caf1S+gN gqbr;?긭d:+L` u=q'*S@Mg?Dž~gT|8t=`s\̧pvx=Ƭ9cH;y$u;TN r&- `l9x!u j1'rMG Ւ[ b)T,PSnk9%gh(f.YMY䃙ޑA9eN2H W`?{؍{, X &̾ ֌-;I7Eɗ[GbɭsHV}bXU9z[ku7?ӛuvѯ U'_o׊VZA R0O KVNK:zB0o '{s26Z?yp(Áx>*Z B Wuva[Ya5A7='$i?:\g"ZJϫq!ӃK#/!1 X$[S5ᢖԿWc7-hP\@ߎmmm ѤF/b:so>:R"J4Gq"jG;XG%G}(:}tD[lSŜF)CdWF>4F[\RRɷw|"59JRN(ٟs^ܐׅq8B[ UyDr8IěFPVϣ<<>ꉩ "ayfb> ǰ#)8^2A3>981mGSv&PՈPUW E> rhO^2O^;"CK޵+Is_XAр.*=pCI)JU{ 4.@1n3q=Ϧ5+h@1p#WJ"$ia.ƑG8-!3,(s^xq,'xhXp<,T!* #lJP%(Q9㌉p#|G0'#FUI*ݾؚĈ8Inƶkb 6#>Jt>N(-uˆdJI ="fnQ)u ^_bi0^r>T !ihʄ"!Li¨d4HrH6%9P`qA.ʵ(ZFZ]i<%uHEzP(FCO!|Г5^+6p4MDLq*CgX(Mڐ)9op~t 슁x:TyyhA745h2YD QdNߩ]ΣTk.N09/0}7 A@X+4Sr`G\| ɮtS.bN!һ}s' ǝ!Q)kYF'yV!lY'7DFKF0_e-B9ԁohɳIV5e\}yn\4PGx%h؟/Wr/WrUwԣأNʳO3cEk*r&2WܛdA,boQ(,͆mi kFDu kZkT9X9 FR"y+a:xϨv^XɅ`F,}Sn_oknцBF_[ц(ocd9̋E@9Dd(=(|5+ڹm_!F􍮃Pzv \% ]\H :pB` *$p^OwuLy/z+b-b]4D%5&(L)T{K(ZXL9#vކZ6Y. "iK Ih)',ߕGRK\, c'?iC;h>$ -"JmIQ?hshDۤ\t4Y73ZVWjօH|4F8(WjA˄GOבZXnz 9ťauumå8}^T8=Ĵ gYB' _$ 6Q#€)_P_㮧HKh%ךdfW/NƙB ~Ks:j@Ed-ml J]..PYwۥ/_wd;O@K*uT xkmBe&{mHRdzs0"h,>ۚ4pK :JrYv?Xm6GV4*ꓐoNڊa DBw(F_4׊\|nFk$>oA8;NxYPrvq_4/=hSʖZpRMk2v\փP#80Hw \>|g\k#'ڴe5aí&Zx=1 #@?kat0l\]G~gUWpXׅ S7Գ .AKF2"Q:1txՂճ/{˅+9w: 9-DYe $(u# dQw6@H7ÚV8 u`CF,0=Dv&B yHGD(IDyG\5t`\:+i3ep)!I.Wۆ4t2tZ!v)DHJdQ[5.'x}4(NfYdDR_XzkeV-cҼU \3=IsGr BB TH0XKㄞ~Lkf"O ٭0p-Jv0)D0"r/pƁ>7#m@ fXIó…zx ڈkȆ-be-m* ;V(xlbET&P%UA7^9JrVGM$p+sPSmLr. ~sr!|>n9yR3{IA_&w'j.s4´a97(Y]G_Wv'M;l;N_q/YO.-[F5d/bO{O@PZ e~'lO j_mdט1JƬUw'|۟/\Qsrxdi qKbrN=vC4g;ْ{3cEWN-ojt7d/d 8v 'kB^^5b,Nz2OdQKOK1{uKôp┬߸7Lj6b\,BvߢZ2$'O7- \Ŗ 5T2HoP46 e&C@wܝğ^I`prDh w'Ơ&Rrv'A Ç>qen8 TR5ЅRrcwhVx=ͧ?ZI:)ܑz#(+q}Ԧ~j09doSD6oidX䂆_m`0 Y$][z9P Zz#xfcrqu\`ق+, E!bΘÉykg)J.O7 >"ͭ&O+0#s)}<aftaCky轐0u j=|\%Y;+c)bDMz{6^ԣ4l`7YpLq坽SՀ sw/)k[7&xX| Fu%A?zqw/<}*6MJBtδF5{>:oۃwZ˝wA%?-z$Eku N7hJTWn!2v}y9 %@˾y?z Fy=NjIzyu?.v򰬘J63Z].K$5I𭐼~@i>ar z4æpfQ }[.sE0d@C 'ɼV QUT<i1RAr ӌHL-'I=t^InD>\Na*6ĈS+E!)HzbPD!JLǕΜRX Aa%ZbPVVl*'bU$el_9~ "<א5S|;e `h|m=\LPC^[b&I) GJjYȮ܏'8E@rufw4yM#+tgşǨ|p-gKv|8˟w7w>U#wZai0spĢB/A9>βٵ&}-P ׯ+{~?/t-L[V/eʲAhGxBedes*ǀT!cRFʸh-o'd.f ZhRjZ}B?vQ<_u;Ɉr#v]7z(p |}JqP0mq2GuZfY&o{Xqjopzun[|Z^ CJ4VFW鿫r:~:q &ǔ4*D,DDF$Ʃy>%?/񑑃akoȷEJsk6*ߌv]ʮTf3XY :dj5HMǺ䥚ۺr*z;Ɖ3k+IOsE`DyekW=j*X;8f)+L.S2Chp;KvtQFhI=eoP?.WJ޻( 0p_H /訠zD Q|ՇtUn uPH}O>нQz9Twt@o^]a(޻K pGC>w$!VDV%N.|mJjfÏՀT-~Wm:߿bԄRrdҀEaW[B71($uԷY=}~p{ŧ/x&Wxᓝ~O])R^+R9]J;'F),l CvfX`7`,2jɑdwz˒dzɪ`fMs{ò('g#Q' qRc8A[E I5ܟP8/Zs7 fηRm똹Q]&?=W l_kӨ\̿E壏u[)Ք qM?+pDyg /h}vcNooJbN ,gIXڋ|~n6tTbR± r,TC$¸@PSyp!ucˆW:Ի|O/;cA|=r%l+v<=H46ݟ P {>"f!L`ْz%LDk].$%c9,li"H(JM)|DQ@pjIaR W_@;Rz4ABeQgB@&J Ĕ   XȲ, #-LأīfS`\(\d DHfH քc K KRpU2EXʨ׀O4qY :' R7))XJ<!8]nuG՛2b5!AF7G`,99J f!kA'XRxNjhP)cqB4T1-u|aycj,1ƥRGqnIA]?.!dF%R%8;Bsf $W>EBI "Ƽ1Z$ q~%:-f[S3<g{^mkD`2@kGw!Ww3q>UX"8o'@3eFȗJ`RI/!T+D{z 67UN} sh~#t֟%<5OSOܜVL"dbQSG!"i T3d,']5NlRZJP:D*EM3N۵ܝ.:rޞQcuȪPwU'*jDbSwP'!Zz)%7Z糾))$*(,2l]1nw좹$X {fwOUҭ{cٍqfRqֹ<цsDHK$H$ofX]3X0Ѱhi/2+ &lu0+8D^V(xlY(``!84zG1sU,.h2 XF1 @KJ#"FkCaU e4yf*jtrPL9ܠ]IM]SVLj5A(OlS6 IeSf'ܾ =M!/<0/o'yi›r?Rt5٦ߒ%D >iƺ/ Z}4/7.Qڻp_:y4kH*Σ1 5P!n@ Qqq^Ni-PR%5w߼_fj3G=tZ5ϩJ=8.,0\;BCԪ_N\5 $/T(Q;@z aY`]S&Tb%- nQQE6TɮRIV`FpiU(M [E(`Pd (=[KDc7,Nc@lJAma K(o@.6Ƈad|?)fOwQu]0.J}K(M9#aw=88y !wQq8X'wû(iћ?ՂA~(( #@np@7RK8P@@~2ԪܝxB͆@Iи!p)]HE J%L(CT#PjU"XZwUݤ!<Ԏ̌֏lL֫Nx\ }xoW-8sR<0py|Ct4Y"O3hnF7#a\ p'1޼t\Viqc?V+ ۊQnrwGncB;&cK9V Ӳʙ]u4pOx%*$6 VTK K6ڔB, +l0X#-0!YS $Bb?bW?_/KE9㸠HܥJhg&PY8TI )tA vWR@D\ɍy*HZXe 7ΩUS-ʤS$n(=O珏u5;#_eQ)&].TID\+d T2͓BogS4@)@(]` 00á-%/RPir\кȮP)v6uFhʑčSg T TgP:c]Xg˙ƅ0Zn)qGt=Dž@@F,qq1^Bɀ `D!M:%/zBjS) $-pn, څ.rB93VHxu:CnjyI}nٜ8_]XVX=$&cB?߹>\Yҳ=kSbE61[ gz:GO[!AN'h%4+CDPDzAH (xg o]rq_b8~ыs~7?W88q݂8SDh$?Doh_&nJx]q_ vǥ!ɂ(*XV)VZJQ/a1}y! a X (Xan&%u@i}bY`=iBƐSTY"-8 -gΚƋPZ - k6CKhli=0MAH]2MŘ&)׷Z8_j0R8/ιq@s bdTV +X`b-1vŖĀa^bE69.;MWj*Լ%ɶ[GvgaC)_rg7?Դ~t,;`M4=^rAk/o?Vd1ɺ\Ng_e/i늘|/) 'v2O<'Kċbۙj+pu + яcrLJNQ% tu, X>>Cm)G^tK9ݯY%H,zVH ?*ь}nTـ$;xQŪ(v3u]wNP;ۛ)QNLxoگxPu(Ygw-LvDwɿ. n:0t!^ g]JPoZRޙ6[-SmrL'Op3^OE]4 ߗ3Yl:sl$n?@x'-TDV?W"=ֳص Uf}oo;kАǡ5Xzخt%6{Y~ }0_z8v$/gh-s eқ {<{{ aailv!lx:)25n$WDjUkbFLG_5\dym_Lw_5jx^zv/^K@?-}+ : 9(E_NspvBgU5۽w{9&}s=R\iUy`E>̿WLf6֟5(6z'w:L g{ ,AyY㕶?>UKa2碷t]/z*52ps럓p .Go|X˅T/ltF{XѪŏÚ*~sP.6~zzC;n <% VWw+߫ndAM?zN̟ٕ#a{a-gZmNޝMmy~f̈́}p[ןNk]T qz_RL]0]s91ktŸ/ң˶9?ފu3,1 ~8[$ؔXXz%p0|tL>_^D9 މKspÏ|"e#c8==4ӎpH,v3хD@xBMpۈGY \A-^M8?-qz]5u| |s>R:le &Eԣ]wj24]16\7/v_d?z|W/Ӊ?f1??RPQPQPQSs:c(0{e!ݘ ā`%CRkLTpU J9-d}o7 Od8rJM܌VqiVqřgǥumnPj]hzhhb+?(pt;$\ta7M4Z,+]uo>;}}`~BIlȴ ]ׇ{]io[G+̐z3L8|DJԐA&)res%'%.O>]K"QZr`:^T8`kwouiT??yW\"a,zsjdP4&LO&FQ\H-H'o3R!A 9Ň/Wpx}L"Hvnpp{;*~-~|tN,\Eq (iCO ARsK:h Gvo1$6h*3E #7k?xW)Q$j…cp\|6='yQQqyᒠ%AI%z錢 _Q *I䯆;d=K8/ZʣXD&h h4A(ݑi^q}gRcݷɒ{ʟ ~f{v!,*K r*wm] пϟa@" Ic9- $ b̿:W@!5浪i uZX ڛSNl/ @; (0}. 5#g sgmT1]@ı<NAq 7j$7 9 jzZ{ژ} j:u׭߾{vk$(x[{ނZ|=v8JJd@g'!+5mz#U xV6Z]5+SaFLekts5#dqytBڣk?́W?c+ıfZW|}xK1_/Ͻ~>U:}Ys^燇aO_°n<9P#Cuլ0[_{b.Mn/鿷;C>>laC'ݭU|)3!ϜEl:U/UT#[)uSZFu;^6LL݊ZWvgN"N>ـ%uPNAJ1C2fxM+)n n`3h X%S'ổbP:e4QgVv[7YT]0SSRvnԡNiMx0٦dn n`3h49ղurOBJ15QXFyfλ6)ZĩʽnГL 1(2=N u+ŠuJhny6n n`3xMN.O@TS%(*mCrzȉ=+ 8a`P cCȤgy阅@iT'x3.%B50Y$'%xb9bE|d@"BſM4"Z a,^E_O1YQ9깖u5iHBhF;>ho)ZP#AGjD'ۣɂV`X!r:i]>jbn *$N $$ 1 QHv Y47h?ws/?\B4c(Փ}j^s0Y 4%{T#mJ]"B&@Ԅt| zӠColθ 1(,  9EYR.*O C h7_q֌yxs.O֨XqLNçNқm(? +)m! 3S_GQPaF{C[A w4@W5o7l٧ p YX{qO_ev2ɐ]N}oןWRah%rce2C?sPmzV2,)ayj8W e>ITɽRUԲ“!JK_T%d{ċÛ}1Eo6p qVb7zBr|eZ˧6n}=@_K5F:.q< 3NrL^\/SW\s&SIJSb)XAGXo%t[\J *ť IFp~m(/P(mz]MvpLΧ3ZI 5 9/+ϫxG[\U#6yZwyU #H5ɝXܶCH<|@&Y㚅|8Ù$`YxR,oƢQpΣ$yǃ|A3@'ś [&NM!\vχxGaA%!fmh Lsܚyzo,B2Ȏ^t 8NI'DtLHW>y^f0%F˴a%[܀Xj=,b;[;N\ߊKҤZ9K7@)m 葰#.6lP?v+E$N3G;9;q'IJ֝ en1du y4$5P. '8?OR8>ypqDQS&3 Ͳm *9΋շаձz}EYBV{Xxè˝'0z} (J*V^FzʙQO8d5xxxcyxxΧ#-qOuV_˕5:z׫B'MѾ)$#T$GGihub4;CIUx3?T%vT(JU2vfmY7-3!(idZJH J |MJ#U2*G#V&it 2Лpa)(p5CWa/\-*`DK A=MQEd92LJ#|6RlbKȆτ:hI\D〃\:*h!g]K>8ExdCQA9Do c>8ļ=Rĝ<*t@";{/Ω4?R$lI@rjAU-/{Jdjsߞ/er\x361eS]];M;]ĢNeh״]u8Vϝ(]KTecDSWRL 7ŠEq2 !IJ5֧v Pte8R&$ğH^h:{TuϚ[:0˿f;}XuTj;HzQ+x@-wIJx qb*oI "!p.Wq(Α'`llꦥ2W5N;(C7ۓr^ʸbrzN.srk;0Tl#V )5RatrNJ>]Od%~3Crrd !EJZ.c1eۋ5ᓉ'qDttgjZHe!?Wfq|XR#xccD3X9c$|k65e_JՅ@]X4 &}pePb.9h)%˜$R0 R>*>~ߒ'"d[H"<ѻ-_}sPUɠ݀4}ҍϖ I1>D#kA}`:KJHW>Ni6LgŽ\:^2"xU |() pLZQw͊8ӕ>56cv;-~>~Lk}( L BQ򾢲Xwd1)$1yw7kK׎98I8bsɇT]sW逿`NgE _vYGlI$E:kea֞tdtoB)) 6=v¾dFjJz#grRݹo/*ؖ\iz%}5±_o$3j+qK2t u^g@(B=N.a 1Hw8Jٙۃ<#폳zMۏ~'ga;KRQ-`ą:'lx{l{RJbOo.]d/ 7ZI1ǗFJ;_\^#ַݳu/{a;-~T |φ= ڨsF=N=F<;J7߁|eh3-Cy6rsz9T_gkfq>cj/+);`ZPyV p{pr }?|!T| >3W_i9]gllu;RΪHCx!m8͒`8e<6]10Da hˋKaۥ^ҕ2VD,US+Vhə-z;>nZ3-9m"8bqrXI>b>!fᡴ^@M{edy{y7\6k {{pdJpӃY gRhQDjq*|)v4*KS*HɂBr8xF5$>I .YE2-o L @T@\)-IKUSk!hگ=Ahhj1&@؀x֫Fv~ڀo۷e/Dbb q~["D?F5w];xpĝ[O?CcYL./YYJM)Kp zE_*]bAm2"Le%dUv/^I /^nDG_s2^Σl*y|6v~"GŎ98)c4tҾA “v>NxGwO=1)̹ Gv pcV2&h 9ooNo8D⟏30{_̲״/e8~<^9pCʘcGn~niáonuwrndGP锣]%/D/q<WXN|,.@9~D{9 f;;>_': `d m &G-ƣΧ1rhسo1ڭ:9t70{2:1JK6|;Z Jeh~j<hpPJ*QRx`п yo/  lbi˝Vĸޏn?Qݵ&8j_̝5uR+Έ)4A%APVDz@ʖHHjPg L/]P guz F|鸓sKe%'A !H%HYVʠs!s2s2TwGyJԤ"KQ4N]RF'I5Yq# [!Il<;\YCE1JmF*@ ,RP9N[iEkQ• \^Rvz;؁¸P뗖6q[NA*݂P(tQx5md%XN]Ӛgl)4As"U#K-W I#)58a> #3dP9T;ȡZ_r(J8g<@gUeUIQpOBèȡJ+=!ִy|flӛt~68ԛH@KB7P6L2&7fB*H+ 8Pئ7APћ*A7:Π4Ro.b22OvjCTNhc6M&wE` c꺉}5OBo3O+s5yudQS|ηqLͩ?ȝcNy\UMMg@j(vQi/+jm`lVs:<~KOx*yg V$+bTufr -ɝv{AKѴ[BS[hL5&Cݨ8 햋AF.v![BS[hL)1c!{ﻶG7U)]\I ޣT0C8-ndT(AeMpĀ1}gK d*/6& 1t4 Ln>^ӪAhz (/bVX˚{;ysq>\ގԗ1u;>_o(V %:H)HҲⶤvi5Zk$.sM^h\+"zWab l]!DkJUa}LL ÆFB>'_լ$0pR)%J-ģvۘ ,r^~OѲ񍷷O}ڑwiu #݂vU Ǎx]Wi4ZE.luh/p΢<.><ۿ5[*yAo#!r>;7kC$f"f4N:C 07AӸ^v,VX2lI%3"V EN8@T Pchz.tX_>-q7h6ϻ-z}@CbZT19\\EG_D q[:!!kxC5xFb i)HJR}&#)b p*{ QzܡMȑztJ*¿FIU a8%}$Ǧ.(yj(&_3'~b f6c(uT"Ւp21~ =C;ޏZ&K:<>a!%r"6b@8[۬e0E"Lr qΏ&W[̔zѧ`B~X֦%jkAkM6ޕ5GR&~pL=se [QKZ= VIb*KRCw"_H$0{Զ,_q]ݭiatUJ6v⎎JWgt}M6^&y-?lUp~r{7W+[1Uy- fJh1*`o`"+%B(2Q2`,|<ƶkEٟ$ G1``r<և5avs̕DM$}qQ7~x03ⳄI|qOoLruT|X)'42/_\+0y\<@R'Pũo6gZ9s+iSq2Uŝ!+Ʉ.AJn-p ;Θ"N[ΤTHaS8JL{?O@kS-+[W@'f8CUf5%#n0ڑp*[J.׷c (fQ+L2D/mUCJ`ü4 s:ւ#U6 J S7ծ!+*STb7JhQ8PZdĮ*BL, +ea+pJI`SX?\ Xo/>h* ya2OC,僽=F9pBzVi`\ FkaoBuM yGc/F8țF}C~eЫ̺Yu5Zum1pֹaXoV-|YE"%{g  qn| k:$䍋2%hk4?$h cij&8rҪX9Q$)S!`I,s)q1. !^$ozA2m8Z%9jF. )DJa[rID~ǤVU!Ҽ%*4Y+>=#lqhvbL !73~ ,_p;a7/fJ9#%^dJDGj ^ߙ7د2Hx|$4 = ITF3w7;˧GgiQ؟I{?u<~gpvJ#_N}a2֩r=XRdzFdva2&{3ޯG!:C 5,b nG2u) ,^9]1O:@ff5&{1Y-{mN[๾HB\> c{|G *kS$&yoOMOSǍWr ?*&nPM,an˘PdBNEqRg2pa-U]1CPF$:den/.cLJs; FmHq}zfN㽯-)1 `byå7?e9ثﳞ[ta>whrЁK!a@k%^&]cz(y jQъ)0 WW]]ʾ,rX3 0;Or:gFsH\]1Wx±EaOt YMk3;xC\~G/B;niG]n(2:w3&zxWR=}=%=ǃ" =,½߿ZB'?QccWcٳUN?>Z{o/>7P ~|nni3z9* TT"\0))XZ)B;+B;`1"<5nqԭ *,RzXM"|v wgL|oD-z^tXI-~AQ*Y#9: uuԝzSwMݩ7uMSY&K?Ȱ)JUre "W*u d +J֨*6V,XgSJ[XTɭ&p(o[PI47Tbp@Q sFQX"WHR:@/ŢJˢ`4&UL#(zŒL#1L]1 WC3maU9Ru|i@3KN7i k. I ,`v-gRK|O r97|_-{U^]oո|^LrFr#a._Սx-{XAΚW:[ Gb{`-I)k8l9=Z\ۻvϼNk8׼: #1K]^:G&nN+ư"T-}ï߹v;/*wHޮasFV̷D ~TXFWɔ3&5Hi4XPdRA ~~35haP?-2@-hӲ2"F}ެT/ҳr*A*0r&4aD#SadN|( \ û4F,RX58*xcr+|R=zjz=M̙/ аCr;4M,UFx`0(\:cv]qL +Sh4© M QE<^@x4jj'{e5m_޻"@ ec;[r1pAqD͗L+DdaQi|P,830ʀ㡬 Pe@HtQ+v)zAUȇ`c/Gź sfC\ej۲R]}v$?kdurg-)x*tn~{~JFC(H)\kI{ւU|S*_̪:Z.hؑ/n.\y\^կ5Z^_W*`=6?%cѻi>,6PU:r枠w啻?n6VxJzǙ^zm~Vuէh-S}T>+9=I$h+u[k|2uK DtRLjny_-[uCB޸&ɔ³87Ut-I#ƺMξC[dBC[EtowFtZՅ3^zW7g}pŸE]Gǟyq؇ǫ[ `Zr"&lc^A ] &9eb:̀P 6`ؼI>P2Fu|5-ćE~6/=nD"''}?/O`syq`v59 HFL:_Z 1}_PP5t!Da"oRb,[R`ֻr^FNE߳> r'(?4Ch(YȑZ2C-G:[11GzoΑ^>=:O*1Q3ӿѸ8Pt 8;BeF h⣮σЄ9{ߑ^ *!Cߥ0CvҬkGPjD O.".^cV@! ir"ֹ bb:i9,EVRW[~ٶ\Osx^ߟ62;X_wO֎Pz.’=U{=rɑˮ KzJcbPIrAP:jDhn$&4:FuV^We^i7X>cDuy~s]$;:It)|IE"I3I-İKI{LJ6D19i .1JR:&Kec9-m(ͷpFuB.w )AoT* J9wc+RȒ*F[@,E?C1CVE%D=thIAYb bC-U)M*d,ty(OY4A@ݍ{,]Ϧ"ӆw> f>}f"L`ת25ĶF5U3RΆ$J ތ }k`AGkN0"҈qo%ٱC^\Bro#5uxΐ ND$ W N(㤊@TkvJ; $e$ڳi׼y\z "K pe%NS7 5`&XpE EPV`/O 6J_<@ 0VZBBuIolSޞ"^ +gl"ֹ]r],*LŶ?`P.tp7noyb_vs}Wk,eavuvq| IʺӐS;wwmZ{߃BCsG&u?BT0tSgСZ qI( *qN{"_-A뼤ݶdy:wc/y2R~BJ vI4UN^Ru~R-]LeGq!rj``-'ec/=- Mj }v8&=p{CZv҆wdÉjuձ=xsHCi'.$ ;D:oLf*U n[7=jM#3*o5Cͳ/ iߦ'kYz(dCyCdc.~)\E cW`#CoWN_r|)f(VHK5ô1{YwSrY6`鵾jD3»|D%#ψE_=k=3{Wh*tj;m)uomRlr՗s[ x.o٣hh 8ǔ zwdgj1Ux'1A1WUq|}9ɆX {5`!2ʣe?oOohq\tq\@r}l:k|]`f˳"0]<8{>$zB,wG=o F40Tt9tB(eτ:qN37la[? 5!zJK2hҥ NJ5QU? 4!{D[1-^S" Ь!9!OE_B2[hW.)fqE3y(TlyDq:-E{7%t j\Q'` U2v$&UI2U" a(X^:c܀F<}M~CU]BXǸ Uy8ZOH? DKEw|M%SL+ <W.z/rL<u""%e>$*PVިwro4j<1dF5m3hͮ,:8󚅤CBLIL(IIy|i@52% J[-"7\'LG'[]8c/b(r8%R VDM8iX.K['f7?Lל< ؉PFS#U YTtHg\T:8(ג: Q@Qğ(2M G0i e*`Fq4; ) =#Zhݐ渭Z3/J>ߡ1l W9Qչ|mƭD򝏿456XђU+bTrFdN.c#Q%9|4 hk&V#)#g-lD4YN<cr=:Hhtvg@ ]q C?H򇛋h&߶<--o}.Z*l?mS@)*7(v( ?٦ЮAqT GRXœϙd-Ivor/ 4g9yz`~geG$SYJ_oߜM[ 7P# *I\IFmOca:-zgǤO\T#~~r2ژWUZIMpm ]%ϺYAzj\ 4gṜ\\ZwQkE.Xjp 7cQ|7XhoT:UćIO]jz rsGW77?2bco\[b5ߣ#V(GGLV㍴\}nT\` DT)̄W7Eh~ϗ X~~=B415Bb&xk$` 9RKS0j}Mw@I Wڡf(UDp2NGJ;v Iԥ I,"yjBd=bpho?GPgʿ`4U L@ @(eE##DRNWa '˾DkHP)KIFP> h++v|LÅƣPJޛg+P+"EWOGԴ,>.1kw>%_;ɟ<}={.l_[ד7*М/>h7ypЖͷ{A2|/O!Ӱpa^0f0 17NJ  K?}ot)RGYOj4$Yn>h8:mD:@*5־VMrmSFŹ }GC/B z[n7ZWu!EWxM= lW` s%%IƘB%ܹ C%Q$n!/)h2QۮyH2ZOOڂoﮢnp;(`a-\=9lǧWtUp:{ɳ7U[+$ĈHF"'$J;* bRkP` Lkԭy/PƤ>=68$B)b +"[e62DFT2,)& h3.q&xd RZ8O*)J/,iFf >_`k3Mg$ Phjd@y!hό4igWt$!$;xpOAO5 OxEK#HxRh]5)RCH4G ɋj0D8tB%rSAǂhl3 RDQf %gJKr.r* hFߕJ+:9\ʌ6sjTbPG 9L5 ˊ&؁$AiU3N(p[&E!L|j]!wD09 B[MbEetW%j5D$odcZ4xPC˕-n&[4 $-ZHbk5s.)pctN_n?'t%>]{>=|"ʿM87u5<6DR#M=ǟ<.yld,]e C+p < $72׊$A;. KivL.* aDuَSNQVn$d&p=·\`{L`;Vv%8ҕ꫚ *{NǗۻf.Loq>ߣVU< j.ѡ/X/P}SIND_meJނS=ᐴz8!e0lMwW]U]]*c\ҊT-_Xb'+'JoT)=1-i@`.D80/=l3Y!%qc=rAܫR'p$HH<$<(&F~Վ,|ܛmaS y,a4y" f<cpՉάuNOr)=aNa󶰩BN*5^rF.Njm@E ^>IN34̵H Ls9)ˤ͕|F7@ӯF7zfJQ q;7]_rӨH84G S" Csj|1Y,~U~U#]5WQH@$5k=.nְ|ztOcf RubjIz#(AZh;PS;8ExP͜n#P|5QN4iVxz.3ο(}+ћ~Ľv2z)^|Zih 3 >ɐ֕K}89b>B [^ hR+c{@ۇy?e`~yoa~豘ӏ(bK wOXT& CӨ6lIhͤˏ뗓梱ׯjYVNݢ C]5bLo]M>t1*~͢?xjѓ{8TYg렷 zERU17 !Qv'!XP)wg %_{7ᬦĭwJ:92Z ~䬋Kk:̽)!H_VePb/=T>TNT$ &0-UTN?}[;TϲϲϲϚzt@%DT xSh0eA*':2Z%j]o5DoN>Fu8ݤ@o'9fސ!+V be=n@pҾ~WZ +d3Sq4_-%;d-cxN(/iHQeZGMJ$Oyg$@;ׇB1ɇ:8wS& 2[ۋ@΀Š[qmC\C_JJJӒ6FPڕS⮏@KbhJHкԼN%1oLm}ׅϣ&VZ`O *H9-.SצrRDQ2r Lu)Dχkw;C3䌩kem!^lEΪP9GzϬ䑒gKW !j\~c|J $a>6`6.v^.#C:C3 4\Ј㝜V/̔pD,F2W>0^4 T:L`DG{C7 MR瘟e"5+g8P+Ky%BP۟o{`⍋2.2Bdƈ<$jG8&JN)"׊, =W F1"(R_R:KA$a#D"2MQ.0t*iHc _ De<b 3rYB&nV s*< U~rt$Q9}(wcqc<.}ӥKcL1at,3FaRr36{a7nýVE$]kʂF~ձK>,,,)IDJLí s5y ,ΓFxs:u:^;tت %5vC䐤/ۇ$S&b8iY0٘hp_U>$mDK@ #k Gɳ(DSNj•v{UI)%$PŅPr;' gDRq/) む3QS.(G + vf0I0x4%Ss)!Kny)`LvU MAP*v]'U~ˋ""סx~|s|{pIdHeje/wa,xi;zۋojR߱u^NT>U;}f<|r[brT߯^A,z=ػ?%mv߱oi`"rxo/ㅋet^{,d^~? f`{?*`@u>1.P.\D)VYx'f,F_;1_MȥƉ@ǷG, K24. X-Ihہ2)+nO1pH4Ȕ0*4{4w| baۇY#(yr}q=ή-ͥ&C7_~BH`E'sk.` *ϠD5p&O'ZbE/\4,\|Ml\(Z]Zx$ BRp&V  ,M4T/}K#o%axRZ@ZK|oF$į'}E'{2z=Y Y T]"n'I1jBS)fXƨE )us}M 19_ \p*,H=j[Ds3։W"j95Sg32.%jD#Awm84efQVbTq'NNe9qRutCXD")#7A(R͓SҭJkZ2f W&hmlinKx6so+f o?4¥-Eul"E^ɊpɵmɋfE5LuVmk!O^i]@J݇ @[`M^E4B^0PNԮ9r paqyG-peS%2EC#Cw|Ejdv5=#͹qMVf۵/_N%q&1pkߟ:y%"FXZhnc= \d-r~L w5jɛSfvz-VJ|^6t8c3DDxeth>SdWݏe%>l^7ysj^gY1ȁ-)5W밭f~koWZz4MYQf 2Nceȯ啒q 1rZV THA5K=] Gy?>x*el9ۼzBhA!QT&M!;}A/=U#BpZP-? ~'&|X͖ AJRu Ln?e/H/ח'~ "rNhmٝ7|ʦ3B4p)\-ˆ;xy<\ak1X7/iH;qx]ܜ$(,SqƣCeMRN-Ir-⛰2`]]'Ų87-ʲzzpwj 7.m$pV#AM :)4owmCyտ΋pAt)|4Unڨ +;9֌A7 < Cݙ|ܭR joY\Lȴn>K²-c>rc2Bbc) 򲲴eƫW2£c.X$O(Y"ؖ16Z83og{̠C ^b5\vz}x3F()7$0hҥH'phUs[Yv/Z3^ ^+y s&?s&r9yZQHsV7cV$i3f}{Y^ujn_ g&ilh흘OmM욜q/g_z6ާs\me}MfOz6{>ǝXvU# '*|r7&w[.Nomh_5dx$x㉽bJOi5/G_VM'v*~}+EOj8B 7JKi{ITM}w"1 []Mi&`A8n<6,eqvY hJM¶[Vu@"c|L;.P~=;ٝgטFgZMؽ16-zYamҨwikkڿK{aktM+0Ň\,8l0TR]hZ,y `P;h> 2ȅ"eZZ@'D-^$9ɜdF ɀp`RU1GŠ"EuLY.\Lρ@#N + .vhetNhdNh48^L:@%ʀEmY-aMU&ْrii+$n f})/VFF-R rh=Jɓݽidǚ#롑5 qnڥjfm6Dh*Sd̹]_9].@ʲ,0Fk4G<4#g+!aart\^ARvZahNT෷Լ{f$G//nJ훗^|%%\ e[^"ˏ%b^!)3+"`uޕg9gM86(r;M~*ٵ<[#Ul9~!؜We"Ngq<!k.,ߊ-]^2s<+p3ANND5ÐZܦRW>1*exǙLCĥ$vgbNp.\3a4߈? f>d;k16bk{LCYo \,pik1B8_W R@48"f:|aD }E#t֠.47Ce27%pv$$? Q,TBpFqZrP?ME`UbH@2+;:iF ]sɲ\dJ913힨=&!_#f,Ń;uYoU"fM E=`g'v5t.f6"`=뾷B(SQr)崕 fB:d"z)K_缼@5jVGSf.xܓ8[~hZcNwR4Hb]uRn`4Zvr~Q$ePJ"ruy>iG\pξ !IiGX'- ^ ;E/T)!وIcSuem׳:lMc]9Ghŵ5Y| E> ;a V~a3n]MXp?!D!\NA^=[ #WL΅?mn::~ 4OGPOa<%E" ٖ1 ~amɻ$z˻q'1R'yvsh4&MoQ,oR2}r_#_'Vhkg]8 3GNM Tv4 gk/yU m4f7m'=&бuV\R\ PeΉ"ԮT| S$Hbd|_dbn_fӀX%#KY>N %08n쵯JQ_A IQU2vkG_أM{ԊA+ۣ@{t_v?bɄZMb-L*87KJ5B9N8h=e"eU GhPZ??ۗ^l@1ϯ}Ug^`0'ix^KHv_˚~IVUh蓿W7}›=rQ&PV ۖ eЕ Q*#ReS|W{wBɍ4zNCHw=m%IX|vg}_wVKg^ +xblbo CGϵOUWUS]]jR0܁ɲ|/_~{7տ&tə>&9ucIYx)$So @hNSKp(?%=9\?vf5*ݯăaJn"봛;9g^8SŏΟsYo"\!lBn"*eЩt{w˨K,5{o5]:V 9IG5,, ͉b^̸DHEXL]3R'<HFJziAC+o5GyЗXm*Fp*EUf0Wb A|%9pj28 3~l*M_r+dFcTl!Cy6p+%q/mQ:Bo勧-v!U^iK} ə>\w:DY~Ehv;$,)=ߟ>u Q!Hw8#30G"l#u8:ӳRz,fĝdHJp 5$ņF1s{LqEAdcsK,8CO-i-6btMiU̝1Fy}[@]5%vB9"f>?jfgr8|05(2r*0&Hފ0oN^hSw?֥^=#a{4u}>ճt7xS?cSju>^5P;k/cÛ^go1rGoah|~ :]/6'v{`f} @d'=vSk|/r[ѺƝXŝ,ԵYR7v2Py!x(tғrB&`zq7x`(:(.cD0Rf)Rx́ÕL*][J4^ֱU6nYgPәڀGpeN E MpV.MxN>bV>1q5adғh=ٺen-9ί[t ZK&H_UB@3&b eG*RM4 T+aR6 N1HyPmp[B6 N\1e,K2?EMFWͻJFdXxHf!1TWSԛr-#*2h%Emd5vXP`28`^V!4B׉ϳwOR<9K,œx2--^M5Xk`-X:(s9Ӿ0h#c&!FXDÆ0))\3`#bJ Bc8‚0d/_|Fڭ3<^f2 L0'ѸB;=aJQ 22 CBD8h5b[-:H+(o%%$A| ͉hԤ xFϨb4"D驆J/Wr*CǘyŘ{ԩfrrru_!sqWP<]W&L:I`iL,ef)6ʹ]A(Z j㍉I`.:1 5%J 0 z"t9PN2G.k94Kun<6ʛ ]2@Vl2?xߙ_QetmYoGג1W ƽj.bo$$FJ kF76 4O=/yYYb{pf`ddʋ C#KE`qGzf3q&4qdDq@K& ѥQh`y @Jy^œ8KSfz&lk`i9ZpqEf]|'Me."60}0~l|QB @Cz r=WiJL6BOu2Zu0,I ^ 6Jq$zXE*)".0dd:6$ԣR+3͔EԪ"r!87i a r*|MߺVIyö/z ;ߖW–gʀ\ƶFc) w HS-(0[5LV^wRQARuhU""C899ʿ2ʛ_dOSD0*i~8&y`P!>+j=|zo}FuqAJ'Np)-~*FDQ՚67 ˹_՚dAq~jIEwZsޝjͿTk%%K$wFvY^BCZ8<N Hwq9r/8\pzȒ-ɉc,{43GRC{||X,9hB"( .ʯ .'mro W> :d%W e)}f:#T6TŢzZ&4fsVeC `W$Oh1~5m\83Ymd ({(D++6+ lJnwsґţc6tCn)?i{)iȆOaXo2QT۠ףͤ ֤]ζv@MŃWb[/E-O.06 ֧ X'ãAcvafTI}-=$< R6xn}:K3UڏHiv:}vděq,9߃ Ѽg.瘍T:;ך1d섥*ʷa[ϻIՋZJUܝORH{ЀUKϭ|{uVɑu٥]gzkҮ_O)גvx5 !ߜsCvꑫ-[&=]srzh1`LjrW[Yu@|VCjN8Edݠ ^*RﵛCrVnk+_h؅m 7H7XQGm@۷-ŷLj{biԣTGC*烞gx3H|z)ؒo<[!|k9ZG9_択ya *Pf.7{bJfO^HK l7_*Up!&5 1Fkl jCOl7Zf@ygy0JLnv\mvsݓu#֚=BMV.-bVE#7"r"9-(Pv!?hŊ[׭(פѾǞGf>|\^G}:)9~ǽ{4,$`qkt`94e+:)Aߜ~)E7qki2nG[kNP~Cs"7T>A 5 ;Fu9~S(Z*!x;gSŷJ*>*7`xnɿEA]/ǷWݦ{yE"}pqrP|۾Xt g9o#Yb6D A|ogh)ȧu9DkpV-AO"+6rY̋K8ƥDє6SuJ5HV3Gh?ca܁n^Hn` 6|θ{GjrjŞz폀ܷŒSY\R/l/7IKag}e߿L:'_,OW-~/,~y%oXj՚ im΅=):TD@֙+pBƈJszdҙKᥟyxE~N Jm4TCݫ:yD㑰^bLCw횞ڕ^c\e$L`ιֈ+P* 夊hcɪ&mbkařӌUHIķHfm{mamvmvmwb'6%ΐwo?}z9M5Z$ 3g&=^`;7(WVRC#~>qa^aT*8"[TceS$d;g$W-Y=J|I=Hf{绊{~999]sll)&ʄ{Ob( \8Hq*8W:z&qg(] V(Ϸ@H晸g?:</-,zԚ/uTᚶ!\]%i9E@!jmZ'/>= ݣ֟nvC{O_^Ť/#{PlS5Stt)|!]xZoj8[ q^8ZYG;n8n,)le+g֊+Vcrm#1f T%85"QtpO2a꿄_K%/z?p_;l@v])|2vc{Q;$ަ-jCca*]Iת# ΂Bx";ߟ3ELvPTr.vѲ\]TŸ}M)QW"= ZEX "Z'1D%R(25p斀 99nMWuQXLղH̼8V7EG^UwsQj -umCDNK <C F.fR$8=%n@:Q!esǩN6~Q&̛< ZٙW@IJfRԱɫ>X2(cXo Z(Z`UkF+⬆Lǀ(eJ#ViQe_՜[x~5K)Ьg慒od(TF#YWZ8pJ@+6M مg ֡o}V`|驪z L@l8AvlxYLLd~mbCLѸǵ{v`&1v#0q1x7>Tx3n?܊݉Kb;ݸE~r\"yOL&\*{t޷F\0f.=Xd|߃ay#  eXJ$NXn a*Ǔ37!0Zj+q@e*D F A%y'>QF)9n0,o-`s9fᵛ$¤A*dq}e%07a}"Y+CTT{\rO`"ʹjJd,N.P36"` PR$Ta-?L1F#E 8+S{H%Qjb)yU$\H69* L!{ST]*Q!n'Eg)ϾX`bbf8'!1(wD :YoLShB>Pt,aR"5wyǂa>cN#\".pw 1udJ|j,JbqsШ6._)i!$gM#KA `TI(MEmt6HĨ3(@MVE|muZ (АصA^vkd \ɃZ]AʌcӧzZ0Tb'5N(rgښIH/8$gku`%!׆?tK1-;z[M  CpKyXNջ]:xynq]ʟ߭-f!p ;_UmE4%X- b(V_n-CJ>k2p? Rqve^vR1eQR^׫ҭD楋)qg@d%UQO* @j 1NeF'Kbɨ$WL']~On!kBB?%z *^Y7׋@C7t%?{HrhU4I"ٗY}Ѯ,ydy =dKe[껥# ౭f,*~41n#&iBBͻZ][{`:a1h@w E4^Jy2@+ACn1%~2t{n/({4¸j攋nȨhglUa.d F2L;zdd}P-GJCcBl\tFf{zOf&CAH0xH0nz|4!uHbũR t9R2$` =0³D( RR``SJi:+M#n 4K~J福= l_YpCC H 4V*b ! :B JqQ;ߚ`cԝDzo2R #TRZiVC5vYѽ u:T<0OPOB4Ha%A0\F$\HQDH.VaZrafo>AA37 G|fsCѡ0Bn\ ^\)1tm,K*sּ^[<(uN6K]X;:N$͊BK¸@'Tv(Y#nSjVK'1@w"; ۢA'Ym8M${> ~gۮ?u5SxhC|?]~ȡl]s2 Y7"# .Ɵkn-0tʱh{rmC+(┲H #iϚEc%rc Rr%엩$Me[saȷd*`e.b5T_G' 8\PPZtVW'[0Uh4Pizk)T^*:W) J%laq_ ]VJΈP5JTpWfcޢA 8F/`ziun⻯~Jї{xܩ&ƹ)(.e)K}Ft:2eSoţaݴ7'_H_0) 5߸Gs{Oy,ş o(,7y >|qyAD|8],[6dӅ_Ӹ,GHH ZƦ'"(s RrFIab7E[N\l}le&Gӌ%-|zWm=o}X~៳{Zß}x<_>u^& 2b^zE>b13%|AOVr@26 :pj3]@3)ËV>Fjj)yDZ κ&i e vy7=64 .ޜtɝmAݷu.S]l+yC-X|/7R0祮ןl \ `R@|}t^-u7{]jpwA!(IbI{1Oĥ*27P 5JQ$]uSP^WPy>9q1c`G% bv,Q[ RPYw-DO|?knλLHay,٬ˆS?|H7>۝ǟ(IT|tV8vg84&?SXUzO"V$ӎk ˻au;Qj3 g}! $q =B2󺼫&Wy{lae"+چiH0\ S7g"6N,Ƣt"$]`pUЮl%[3>|ǎHu2ёBV@4NC IG5=*wa)p'=iRQE$sV;ywU{՝Ơ(8g0_>>J)y gi+fgxOYQ]~+F)t߆?҇h~ycvơgNH&l?ydj!15:K_ڐhm6)wԗU펿i|:_e׬u_U"5RM`,gὥh>O<-6fTZ7$w,,6!BGkZٛ83xos-&*<^rLB/`sWV롏H!Zkv?&A= 6«n3F>"Ð'Ϥ/w3SLP>1 @ v_A)'էIxczNj_}|v.Jq<"lHW9/gMfaa/2{$to; ^cJfcX.q,MJDj T*Ȥod)NWq6:3LZ ISE%epgWJx}k mىz0";{Eׂmhk۾PϾ+F.C)qVHl\_-mo î\7}T]_`?xH/y'[@ uw+֜jtn[?ovl&cn^Ԯg+oh*x-__~cE SRsZn19dC+jWQO=zerwԢXZ&4&<7l螫'z|w0+/Gt5o 0kj-lo&/E+a~C+82GcgVyc(hE"L(!,z_eֹ໭sW ?ƠU"J0 [2obK&Gx^NUi>+9QrGQSI 'D2ClG?s;t8ދQ]ϵ*?b:(šhD4/ѪCƱ$7CvHlU١11I_wZA:~p~OȑVΑgϜy@K)K!F?l~}s5X,>'$Q]~\bjn=.wmM_!=/\P 4h\q\JQoII%QR~H%~3Ռ~3Ooufч7K/%Rr%@3R$_C!4G~W~*gUeV~}Pɀ, a'җpFA$|X)eZ~.TJ~.lqXċ}9^L( δ"QƩ@fkYYMY2 [hEB^y7te1O0$pMqNۉ"b#G.ݡsqXR4#jxWcIthQVf@uR*COC|@ W}.60v2XyGY RWf {pB۳FBkdGs)/6o*Pc%a%Il}df{Ay ?m5Vñdr&+Qo+0:^vf+!u4ywo/8 ʚ2z;KURgN s_ـ=Sʝ)CK^KE ̙1B`iÛ;|NI;b<||:Ms\ytgq(veɒ|J؜1[fM:F1D%YbMqR (,xU(ӗJT !̲MJw 9 rƦUHN1,hf:j 7$ S9LJXrn!B4*l-T[5]płSI18CV1wfa 6b$fվ^; `Фڣ8>ɽ|F VmTSv^ )M)83' &ݾ Wv[ 19@BҐd3F$?ȧу cPqP=%(@=k*s4%;R% kl|&Ҵ@?¬ WVrښ>M̵=1L|\nKqϙD چvu:3 Fr܆YG@.PO:_k($8{i+ {VbdCl,9M9$y UHf^ D,)%IC Rؓ8F"q**A |A@4XZ@jB_I_AWDP`*9̘LVrZC,iK%MjDW]OtE?+@Ik4|ARϑւ1@Lu; jUv-RK/ j;Sa^εq況sdmTbX|%& V#dTꜛߙPpb^Yχɑ*eu!qe Qz+BNw!{TWB^$@ Q2F@@g[dS%rj!PDDž}\@xk֡}j4 ̒dvItf(6_IZ?~"iDYIzDEFaA} 2G)GBs_vR~ŷ[/7RD$wM'F8Qe1q%jQ}x#`ͥa$BQ@<Padw-2I)fǑekVLحVk:`UsvotQ@t ]K()'{,@.!gM'ɛ}Xh#8s O2] ň\l̐XVH9si:5?D%gP137e51Q[]l&%2Z&yU0)s}s/ }] pIGT^@6zQt׵TW_j1`Xcٽ?KBegDpA/'t<`djI4q=H #lnlÈ;a2a8qĄ; B.I}Q=+vYu!ծ*Owm n50!k{ TT0bɞ5Z X+FNؼ%vbXgECVؿFx#Hmx-F gXԾpa'7-6 +&3 g]VpA4L;87rUB+`t BQHwg1^&sܴR%9tU֝AYRwPq؈vۈKZ;ISs8MwW]!4˴U_EKHw~.U=]O.V^l HKh.T4Àe0)@EI=8"'W/GJOD✁a3\89n╫.k>AԪx.$Ȍ~;my1$^"y\`YCE١I O: 3H?$;/@BA$U#oc T @C?ֳ.Tj_KIW~ngu{V>DT2eҗJ X(9`IH/8n+e*%ejpXն9ҦoȲ"A(3e c6QlP++e%P3fGסÞD :4~8|SIY-MZ,BHsK `)--FC^`/y<h,ќ1lmoeH KXs% b '<Xqgg@eV,* n ( -{dӷJ8عvKY]d w%+wo8'3_FÎ+YLJ jAQ8>GR4l 'A@=P6RH)2w=Ā êCSs?S ZvޛB\!qE/jRFN{yےblq6nۃ]S:;;tw!~R٨^gmfȓM ƕ3۝=Hk=̈́ Ԇ-1C>% +l{d٠Լ t|FP~g ;|vÔκYsC(>'S&0'@T]ÈO*># !1arC0 y2%3B*["q^]vcw[ ʍLʦ'G)Yn]<_PːˠpXKAl~Y!b&.MޖtnNG=SэS)7\BT|j B7Jj )e-(2z3v:$'XB 9rR Z{yPURqG6FYqcag8QЋ g<ƎWw~kftJ-7riC_5<4P@o5rX͏eM_yC^{ˤK| 'd2o$TO~# QKzIMh#ER }+E0WБ_ͦj1}产&}+L{ u{5~Y@1:K?I{>_R5 ph8cx>z G#eךc(G$P@'p%9T(&DQ" pfJ*F$8|UE f.#ώA\58\̇Yb8i-14M_>%Z?NFDžƷ=OԴw5Mlo -ŧO{Wj8VDm_uOݎfaWWx`_iv]OI?Cw_~[zGe" a{W˯̣r36fS?&5>ŝ(@Ar]/j=~fv_G4]_|$_/ގͧgz_^? fzdD0~|7 뿽G5=>"㿏~(o?d%q`s0g_ A]@V<&Zxc\y}>KyKr?exџ`8}& 3y/[YϿLjoep5ŧLߦTnMpe{[%)[)fQ{rׯP?t]d~vQdwwb:eٞI]jlgpN+G.Buo~M=.Yi1XF.7:)-grOսDU{_8zXp! t!8/8WмPp _ -xto 9d-,\X*cʇ}U4!3>ik8pRwmVs@2֤Le%nqF;3⛺H .8~HY5!ť*q\O`w6>_ DMf>a|om|~3Q N|oI۵|Z(= 4Ĺƚ![LN 90h}%:uz%ABb`v WB>Uؽp>VOeBFl m)#2@8}q4jJ;J`)`Yb o=@Sl>+le,Ϗbh\\9)#'[}x13(, !|$&tX ZRr jnh?Wg Hd)*$cN7zꭾ S?GPV"|0ڸ`t`CނB+Y.Wppc) |p_sׅUt6H7H9/dj,'XjƋ stT.~nE Wɞ|xV![WfT^]eu[ wׯsuUxwMzV>| yxHijyZ|.3P̨4wr=B ,xT2fl$sm|IN EA6@W-X LcPP孢d `'!1T@)V߿ɅEa\$522Zɛ0; 1O/[Bw RKa5k=)ἿTpV7>\NI > t-=4ҨJlQD҈^)7B-q`F1>SD d43Ś[w:oAӒHqltE#BL~P;0]ZJHJI m3zk)%D5Uɾּ-C׏"$A,8| !\t : JVPFF JI5(K\8wvG xYTGJ`6U1圑(s^k`acJ" xU1 -MpmFAئ/w}-62s[ϾRVp$6iY* Vsڨ7s(qa5䷌a<;߽ޯ6%gYP ;`eh#=ӳI5%t=h~f5yW`P }y"/OIj,Ίw2 3ӛDM0qI~rAyDԫ`6p<`TTB $ q"K8I3 :]&"bHHxneD"֊ 0ChH'H.xP%0H4\Ð4c1 T0^XRtO Q^$|~ʍ6Z(5tXͨz tȳNV dG;z6HM] >^\X%Ω֠( QHfAZBS¬`=y{G #.1t‚ɋcHuIF U3ZMV+ }&I41&"xe"XKBK\jE16RpR~.=]AS1g3"J,(# N=ң06^a`L`\;gX.utw}9 qWyP6D% or.i]hy{CqY_̩ò0SgǧhӳAfiOZIxt YLh,.u0m;;']vՉ G?1I佚+Of"t'}wAYa7'R`bPzR.[{u {SxF,Nvf.([4F"B(ZA&()dxAHKuvoc]%S.u s`vK6/ۮཡP4y|oS5{*"$~8J€.i XǑI_RE<ì`6f d婪QLՑR%hKF-1#AG:$X7"?GO@^k2*fA2:eEE[0Xt0= #.pJ{$Qe ګ|Ԩc:|r=)jyq+Cuv'rZRx4 B`yb PGu vOJ1H)KS>vE.s.{N̒ta9CDcS""BX=] MX59ͤmEBB,WH1S͘'Y`PkHy'mйl^m֣* %C*5Ar12,2vӴ;DXrfz}˷%hRsρ <7}=A&;;\HpJ^؜,grzUR"Ar|م7\~yAkQDe"u႙QZ^Uq 2i{$nM40qhIM|f  (8ATHJuGȨf 9Z+7ҡ\'x;"G! 3+mwS;('Gɽ\G(妉GnCKrn̐;r 2=$b.=qľnעF'kIL† $Sp5y?B!:M!VL,uY!qS!&.Iż`xj%&;$n{K]ZEGJ]0 B̥|@m羓L*#+{#^hy٫$@U~oMiF{Kf#OK^JG_C&J2 zxH 4J&:I#EQfB5dBIpT ph-\{źK9f'jby}.c(&㦷=۹FM͎2ޱC'mx4062P~}*[WMPM &6Q~e~&k"/Y_WxʶVW"%E PQ iK,?&53_VjLm[^>3QRE*WQddk-\Scװk5*yFPj1GTJ?kdw=gOvzޝ/3VEȌϟ~/{4/b͞g=շε37ϓ !M'+z;. Lć> >+D\+&?up^W݈bEx02 `$P\<F ߾j:~އ -ؙ<[4~PinDB7:#=e򚖒$$!B]hwTC59 9O;b矪|wIо64"TA]IP[XI޵0W&ୋ5SVF.%k$` 7aU΁fu:mCFBDuɅJ**g4'(IU d!y7*I熰^;@JjG~@Dmk˭Ad.& CsKfT)w˒/jwg| r=8i "wȱo(LJy_OC>.=T*!ED# /9?|^d\fPhG0bQZX܋lLc Z@瀜3[`e0<}`!8Ѽ]yZ١ 9:lWft$ G !8+6!e2N(j Qjoo}uUs^W2rl-eL .9E}\0YbܸP<7<,8D'F܄^D1&H!@gjrឩlz;X vNj Qvt:kzQʳVd>iQݞ 'H$ReOmɌ4VJ1~,GJRelxvRgPd_{_>W^M`55+Wk~Y1};%UяPbv -llgg:״ =6RPH,,8j0Rmttq[8& |Z Z'1ָ>' !qPs ` B7h|xH﫟om[Fc*1x˛O!RF<$iS"$YxIҾr'q ?4u]ne꠪$gUOCV}oֺe/Y3 s-FKf,mzuPe0&?}hvո6uPj#HG<'ϬxowVTrC.f>Ȋy[ O7S+Pg6#fLOWqg:VH6.G5إPq$㫦Roo~Dxw9$k/JY H|Mыn FMh/Іk_^܆ -k&nr-ERe«vD(Y,nBDj\}{I9t1f,KzA 9?6㶯 g52aymAU&Ңd"tjBD E}]wE=9)?hh˓+uOݖRz y9&LW )MA[kHs?cE{(mvh?ẗ́z6J)suy$m`LJZ*571 m&H"Gl9<5=\fqg   uxRҧXCn鏭͚gUL3FO?edr> v7l9[١3 [ ܰG.b9WW'\9zK_F‰ f{p܃ ߢWDƼIRDI=$-{0۲h|dг5U!jFixU  >`PyBk=H76dO{䄧=QFko}|k&Go ;Q'E SUk9EOdɌ*we99Pif+rV-+\ǹ"E.+&c"8j1tYg =oxo}lvz(3z1Opy=ǮQrcOOǥ1cRfbft2Ð8إoV=I_S%|O[Nh)ᷛH&KVwOwVm40x?V$7E knoK~zݺ|>6M2C\]w~5/ݻV]1F}DnRDmnxaF'&6jI?Uk NU|Եp8^BLo&ru(SY%(Do 1EY.0@Ug36h@!`w)qG6L3ꐢp`EEb ;=3Lz=!kO5-Y[ sN"j3.S5n.ͬ_#^ N0G 8}C eckTAMvƓۨCJG{ڃjª n~Qp$wJ2~1S,Lm`Gȧ3słL&O€YBq@R,RJ ,LlZQؿ^\Fa$~C ]$S 4 gj/ׇLzȤQLm;N 6/g Eq-Tkj9i7U>XmK߁"J8qpKQ,$Ŋ(۶yTI\Xi. >VS,c HDo碤) SˉFl3 @ڐ\q> SI]a[%}'.&i:~\GGm%kϒcu>s6)/9ﮞBv.?7YHnU=nLŏW-d?䫜.Ry蓄<绶Z5L|X55=yO<4i0oOՐߨqGYkMo?uIM։˻UF>7],$HZs~O.Dz==+dkο-_Q[ a$T|S JV5+N5r<셭Tj:ZrRmN޺1}Rj.GP&s9r0+yԠHMOj~r7gG?xEzoI=W׏ηE5}׏<9˨6[!$򼾔P^ lovA~=fYg/q~KL5fщEe>" #9ņRyikP:C5$|e@}NQAh g>B`)>$ӶϽ^z1ScVkN#J mb[2g 5]ڷZX)Ƣ()%rQIU~\\,paYN":r}l=OX:])f2S%i!I1{@P Ziެ.f0M.3"hPF8iXm4e''TXIwy_D!qt;HMiZ6`p2(B$=c(g x3| - 6l*l¯d5 ЮZ[8ILێt&K N;V{>fIC> 6NwrS1Q3lmRG %d#=y۲2h-\tĈWIgWF;D]=DR'.B%rteĿ"`g"5Z?fEP$՟d󓒽J"*ًd/JvzN- PoRIx iIZ/p|>͐?'/.uKUM:|]"qLgt6`zK>^M/W; QJ'onDi{-.y?Dِ(JWòbMc*G55Z;>ڪ !ωD93B{cf!^F9#HBkWh.e`$Et]Dƨҳl\?ˇu3F1% _&Zv‚37vp}рHVzHD/:UpN:}\H."LKLчʌw2:Yl\R>s(LX-yT7>_DvLwRxWdR*%zi7gE7!;l^Eyu},D+>zcxxFʍ4i< P"c<a-AE/^nYSw2NyK]:a,|ܰ-IŻV;ݜn4X WMȓjKPj PAp\4_djI5yb\ldN ~ޞ{҉!) T3R}nn>̌ G?_ @: X2]uV2yA-74㉅_ay؇E&*'0ҙHBx&爥 YLKre6c+ijq5+HcH$q";B譙1I/1ߍ3dhL4'"f}'"`h`iN9QzwES9h qᴱIuVMu1uqwqd)¾þoNrRQܔ,n'E TِS'~+cB*ͥ +˃Y04VZBF[6%+mPxR112GڐQT Rvl:j>?_FH?Q}+>^0Pqu( ТI xS>a<\r4"31˔',c9Ļl&SO>-ˆMV#{ao9XPfQw{1$Bk-zwZ2px'}Dž9:q3j>o :KtשNJ`Qa9̠V,bSeEbƒGra5-Ud36+`5 P|d̈́\F؜dN&ʜl}GALۯ^S<-W1uE5r烘d bCq;SCH㫩;b|C *qQCHV%7/)\f>ΤJ(f3?~2Ҩ Xg/t~RL0?nsFH!q/Y²deI6${טV25@gxf>ΦREb&hǸDw7hYeϗ/j0B|&wH.yZ/Q7ߏ~zw?}2~?NWzn4wOU~=-fr qp?)fv`v}{dMG0yߟ=P {5/"MDʀ$d.&gk"5rb>X =ZjaM0F[M"Ňt5XJsUr&A-ewc:A*p6Qp[_Eu t۫\*a+fk;$v`Ӄ)=ȧ|ݧ5G lmXxS *3Po&nw]Aȫ?,.#}IXWqȐ zIr=9hVZb #|>qIET~G_p>;oK!֋Vs%5?!XK u0W=ܝkfT޹4k|zk|BlϷ3V%Vε0GWVXZS&QfВBa NVcDQg=L'"ϗTUnS}Pk܎.gdpu_gbwqIc#ig$P6$`,/!,/"<)Kx!>/ Sᦎ {c* D=Z4@ҦYJ.Rpx;Ɂ²ܴ}P4CV&),,7 wd^8' BQM3zx`ANQ)དྷLMܺv!`hhˢ&Ás(Y1vDaGZc5P*kWN+{4wbX}ktB*(ݿCOmA:/f,]Mk _#_D?FѾ㯟tO&WGQKPwIckȃg%C +s xal‡1AKɧU#5 pIug7*GCf6j!uhWHD1s%XΝ }lWk[E_ܻ0,OicfVAjͤF^U0LQ*H/Ww->_c61ճTWN-䉔}U?y8Ja VHMh,JqR#_zɬb'QQZ? mdQY=%]hVe &Ү/2M$5L JAK9'jPBY]P8ɰ{gf2ZJU*9@% Ҝj*&CV1O/P9[ pӧ z>}.p1/e_xvM&'Jǒ15S{Ӵ'TJk9N<ċ!^D"zeD 'u✁*2D @(s >e#9N(Iwe),nXjϿw|P@ jvM*!Tb q PP7o*CΗ*HNak _JڽםԸJ  5/I[dBI稡\ +ݧù AdgP{7u&%{f:sW L=p7 @NyhUdQ2Қ71rQzx8oՖ-$Y;WM{0 ܔvnp-yޙ,_Q6$\jy/a+/%|]ZQ!脧2ḮWnB%]1!C.}ZMR1!zﳓ:mL%^I{7jPYi'E_IXY=ƒv޼kƒjAN!QplN џe[o&c^Wgz^F2G{7?G)Aۿ۫7ٙh:8}Ø`Cmܮ1Ƴk09I;:sUsW+jLg4xvO.&ccLgvn4]ɕ y҅nL'+b?>H'M$[.f)mOMJrLjJZ:F*#ʜ6.IwA1wm==ǔn.nEq:pSM,ɥFTIvcYIYNUH.}L@E Y< N7 )c \|j\Zd/c 27gMU.D;O+;ie+?akm3f랭'Sz{ Gvhܗ[M&ӢS11k&!Goa 8|)[)'wYRG~iu>i/'$4du&;qCZ+DJ]֐t"D̓w >&rB%|s wjUY3zMhO>A; T G3/Y8aW+|>[2{ALviЌm޼ k̪F}aQ!1] ,*}]kjҨ"A"ɮ{f"AkUDRG*9Hd~0MY"MdQlMM(ZApxrtSBض [u0&F^$ym;$ p(Sƌ(]_|:&{K$A?a &b@9!(P$l,, 6|c4 ŢӤg֮/C -cHxśG-JR6=3JV6"#.∈XC2$5wɆ,&P 5fB7z;rUgQE7J?TMfvP0kT]^rҥ2UkX 農IJ\ՈDЅM^3%u;Zh p,Q?,yd,Vp5d$Dת|ki@^LOV[ȜL fI*%Wч +I#,0S7~/(P>SEkE-V<"`cfe -!XxQFIn,F #ǡZ=/X9}XF8[*& eS@I #ធ0ҨrZQK>x%CW6/D걭,խ}|Jbе"Tc"(Dc87o)iSZӶ^kq@q`JvCX[Rt}: B[{ޮZ6_g^\8*.6ƿٔ["0MX_(Pbu>͕L6Ƃ>+ฦRB 4c: >9d]heHCpMk/d;(G)_3Qfv>1EZ6){4_6 D$`=G4#OUv6+ol^/,:5F&{2-wX ÙF(ё&jɩ MDuG`$08 +xT5 uOiF(5Bc_z_ _hr%?ԸD>7|! cw)W٧Y:} =~ c՝wޙ(sT~4,%#DPT{%OAZYe'zgjX >]D<ʭ۹qBrRԤM'^1ïѪ}?U bj=4L_^K{T>>}(BpY3s=uMJpT68C;53Kk[-*F(uyTHq^f G!3:DzB"'gۦ l5r*5n͆Ccb]yX8~ֳ"&[}۵#/؊Ű^{CONn O9&s{ڳM)*ǔM0bbHԢP)tKXDš2A $J"EF(E((X VYh!.Ȇ&$\N|\U!\RiGQ)ĥ /<_Scw帠 w ] Z.kZHKBVz(M+P8&פV^|Aj"w0F8%,qLhe@"B$bD+Ceb(8BD&pnh*45,Nw DQLv'iQ|pkWe_;DŀUD[#)21,& D&1Ŗ/i[pP8&!I0hIΛ3MjHr(ՀDÛ NU[sEQ$?vy;B֘SԒLoJѱf0BaC#Mj"CÄ( X K$f TХ#P}k[Ơ:_Kj9s;mt*D)S:3O?n,4oOƝ?uv3O?@0Yʷ F@wGӗx;kq w1OĿ/^xׯ^_>}yJLF76Hj흦u,~{/?x/~{:H;_ȺXdEwwEx^N7w12.]]wښ5NJ>gX] yO۽֎O_݄ih04W8=zil i4W2xV(s;'3}S2 &3_{b?ǃEܳg*~ gP'gQ,Io< wfmX}Rc f\~Py>rt=//঳;R$;3E^t}zk;/ @fl2vW3͗^7)eڱu pTu';V盥wЄ^wmMi shu珟К-땍3 _t7.qֺd矌0̳ ϔ!SܭOlzA!Oq4N+;ߺxq\$Z B>rqعF61`pؘptt=-}5^v_00*>`ܯW.5#.g::sOs,EkﴆWK[𙍕Gkn6'Swgp̤Ӫpܨl …)L;Q䜹o8(sp[S9x0zsf?Li9؝_BM OyW'ZX B @xO|`JX`Xg6qw|k.L_X+ڻ>2Rb{ 8hȹh 򝓝z}_ӃDZ5 U}N7 pCU攆BN>qw횅⃄V4(eQU7'qw ᭺r͇0R5#d1ļDH/nsH«DOߗŢ| @?eZ5H>\֏HmT%)tb߶~6 fjݟx jBur h{e& OF~msѶ߹hѶ@- 0F.MxDH%Z$I$V1IFD6A(Fet$hړ/2I~zfA܌\;$fk>ƕ?(3)v=ּ~-áa"Bbӄ2 r+$&RnHb&[Tj8eRIVPaM*ܺ SEI(F6^e{psKuw2nIH8B\2P z Q4A$QTPlT 2UsR-JGcaxc``o2 Ci ԃc@=3Xp9FT0#5%w'WHݢ^OT 4ᒃ!8(V $$ёXq e ^e5RLY_v`,aן/SF~xv{hE٦ 5ݛ7o;Q%$ V7s'w/-y]R2[MS3Bɯ^\L_{T ŘFX '^̠ 3gkp=s 䭤Zrg Lx헙|yT.dvɿ0 7hqIgƓ㪍ĂKSILOZ˛~r [/emJXq~͢/][)"*؜Eؖmu"hBǚ65M%a0V:@ rqk!II-`4:9Ph )k+H زp1OaE !^D8-N2Eʇi+M*.aR0PC/!hn5tx'ĉH6G)$7"*ewaEO /4NLQ06IcEx[IB% Ly]Yo#G+D v32R< l`5f0_(Ybӽ*AhúeDFd[16n E2ߊ\/-J\dya\,h(V99 8ՓFr vt[k%Td"{NdT2t ԥE$;ڢ^[r9iQd-2 j2 Ɗjz kL0QUda'&Њ9T;e%1YBG;T*dr.2f0yOiL3f;g9Ȓ Хgp h 5`.Sn0r9nۭF2qzv&W h;N VLi9&6R._r-OK {},笿69tf_jXc_})ξg_uq7ڿ̻F}3@ :[fg1󎊘{7HCfp "ռ\ta%+6tl5q0ٛQu8=iX=? f(u\USüf P? ZSw2Y׊҃Or\/dIk=N(*f@w<Y GW+h Ja 'NVk~Rj2SQp~t_ӃKj2(X_/)rlkGp5!>ƇS2&G$Cx\\^~j4=99<`gKlܶ` Իuѻ)v\1 ]O/(\ߍ>qܖڹ>O9mvPܦ ܊v{ ^nb&h^SJV^Ӻ,gEɧ1 I|R}B;e5x1[{woOEv_i;^+vW=|02U'Fܘ4d3>`\\HUr 1m9 #8ZhXNg.ťf c/ݍn.g}Q.];B;߽vjm~]aV m aVD,o Gd#kf[;QT QI, 9$j N,qVcUK飳"Df\pGրޓRhpt 󮤝K/UH$yI&Q(@TLi䫒oUÌGLm^ p)lmb[WM# GZ6*f]R$^+XL׬?3r֜7_~&&7J$/|1}?լ8#%[R+]|Lxsi]M>Mczp.W՟nwΫ}/My \>A#pu1!=y,+~:a٣>E+H4ؖ G2- K=_P6U[a-66!_e*bR6c4[1ZZOiVb[Ogd,ݙK63|ЂrY5,]GtU)}OH;#7[?Q!{.{.ShNbd>vW#jJSmSze*}/y LJP>U}(!(T -弰yZv/N(Ji!fy, Y;FgGBxq2)7i f ,Iypc$_z@hmJJrs\ ̏[(kzBdi&'jWL,`2rVg=d0mຒ'h3 m-K( Չ'c]pP\ः6q_1U?k-xk:d]7(Q)O ONX(c `io.7yÚ7]Vc&bl?eJ$[55s0 bySL36ә/?e|Dt/,{Yγg;ma9Jrc?Ȑf 9RXu67cNW&jvчYYbl4?Mgc2謇Uh=Ϋ0w°V]H'IV$!VBjFBeX҇sVSc.%6B]° &A~^cۊ Zqqn9O-1fG&GcfAi%I7Erg@$,73TBKeLzV3Ĥ@1 }yl\R>Č($'|h5cKz{gьY kC,,qCW⥘UĥčU4Cdč مodm6m%ޠWf+/%CKHLׇ|_x}͋\m(G+xwH5R6cp:;Mh\"T Th eX36nV& hT4 lZߝa\q 1[C h9)D$);a! IB"tn}`|} 0 0_H5+Gi?^y5xWyu5V*%<Rx ꋥCIS "2:FTP@>jH~aYwJcMZ#-p^ u Uw@XEiୌ19aBbr7ei,:4:YrXpMTnݨn7^&e@GP0vIi )NƲsJ"=X:ۍF,]nlՁoQ-mhV+HdjATLTR{8^VxK |T^n4PK =f OpZ|K"}} Rj%@YYcB\66I9wKi7&'5V[~n̕U?)5zqRu, 3rۮpAX" Dŗ}P 4p3k17?8]2w-&fUEKRVQRu]bl"H-p b#C.bɘ r0 Q2/^|sc:,#O' )^ףZ-GbLlZ+GR 8%l/N[W&.``l/^'x:Ol,רz= % g=7O]Y^Mo~/Ԓo%tI|5)cҕ\ەO 9;RKv:WIП^8ֳk59{EdLR5j6Hz{4vjѠ7Wv;A1dGkrPBL Ar#RB#7خ*u޿2k0CbQSw~؋,W Pֻrec.1[{|kS Cj!Uu[/U_#pFO~8i {ut1R1*d]{Ư=^vjAݳ#Hd|Jb{pغ㬕E[-q ;Xx"+ 4qt::nWmceEp>`' g Q@ dYs Nm/h:#lޥSPhI\բ%8RNvf$ӻ5k5cB#!ڷrBC]\_ofL h9uawS7!^|̘es^kN7= 3 ڢ90D9Ԩc#48 Mi9c˯BWp{@ֿ1Kc^+UaJKӏKXnB5T BSɕ[Љ^|NmC`W_)ƙD"Zj'-h㲻I4_ cZ*޳# նBu?'7''FGz@ 4voqhԢ.-=H4c ˗V"8J:ݧaۭMDV'4 0ә̂t&? Y|} gUw$Oc{3({kirrr 9yoY-(yZx wӷg~ow'oaԖ߲:;m@@1 +t# Nu0 *68#V ORiu6,QԤpeT]4 vzV_ZޮZΙRXy`UB >u|: 27ï.@+zw} *|^.VsmfǓSwnWHr?@4}svd97W?bɥHb=*zɶ)`JQ Q UC"6Q; =Ms`BGGh WVPi@„mSTdS\H+=O;ĊXIuoGHZRL-VpשR0y\"Ī%:xG)xGոC:LFR8! k䩊`SrQPq`rDdKҁ$_ Yv{"u=zޣu`Q=j}}%RHXXbEx0k1p3⏵w(y猒w(y猪9U{hk&*4IA0 6qGpƩt"( Rs7PQ8՘2Eձ[6Ѥu+G~+q1TZ7toe s<u*[,||VpRHJx+x35"R) ;-R'1̈ ABepic[SʟVeRD XIJu;s#Jŗ歰Lq}7'{s"7F&h9~Yo\xd8.v%'Qr%'Q jVQ3'NGyaiz0L*k,}P&jj+!e)k+'\"Є)HWSޖd92,k8)3 a 6ҋhXD{wHt1Ӓ1\ѷu},lx1%#Kߋ vL[;!ŏB`!Î!(JG&P I,HڞcIQOxMض$A N!Qu'^;G>F7uqud5~a=  V]6Ⱦ](PTn+ɸ٪wmٖ-ۦƼ]&&{.[nr)v!T!ֽ[`?EDc 0#>}T}Xf9kr~¿QѺqWgMh^ߧ:C)$u ҭSD`TOueHPcUӪk]VQN3:S~dջl~ٺwbFҸޖULb%p>z6U;Ds. .!o"xٲ롨63>d"TD9_2`joWҁFʁp|^g|XJ(xu#Ro&2 dUjwZ#7pJ08Ze c1 lu$ū+ZJč1kTp eX[itTrt>E OXk[ ;-RP9%>b#˘X}i# PNhbFHT] `AՉ$Z_[옱hO*"veUTFps9<̺[晳5k @;~ K*F ;HTZ`.NU4%@zviRhew)k84^ CB3y> 9u(Yzm?B%u_j2}`3\.ztn².}LspY9أw}pl;1O/sPّsI/nr/s 9}Tdx^1C~a&ٗ+|5AP>f]#*VPUV[ʫص:r }z5?eމ+WIAvFKgHu0vzi UÁOpruⳙ_K/Vc, O*AgB WŴ3p"[[z:0Rĺ}\+^]Dg}+u5Es*68/&pj+ ʐ!Ha8bbSw=*]wJ'*6Ff]^¿x?ą4|qﶬYܐSlP!'&wܿyڊ*iB4D| {!hePM@;\ 2D;sr+]*u)bO\i8:)3&7L->e|R|'w&CgX: n4Ckg y-: {?[STls\^WfʜفiuVZW~;p=m{'IP{j?oi43I@" xҍԂ?͹s.&A~:n0F;EE˶9ulwU\n_g#9+CB1OAOa C1zNݾKOX/01QG"00 G !0\3!vùk|!za8ӈvYO4XrTZhNak% + W'/^]%El7* ߍvT5 @#Bh){H$[z5}lڴ(^W I - N]QҪ SRv!k \# %y-W4Rn_#Y+^z9+ŋ99lT^Qdnbgv "NzC*0mq%eJb9U:(Zpm9#1Q^T/]\ 4%F+_^tiRO?͗nO::g]~<"2$ fKaFÇ^|@˜—cJ#_N32M;! G%-d'C=m7f7*U02x:wב8t)&߂8|IE%fCԅ =&´Zd?K . *EX-:p˫QK|7"/0A&}ҁ$|w(>(e*8_wCu- nF Mt˔.yٻ޶dW3?M|8ٗY4-mdɑd'dH)vSřm,~_wWuUupÑ:CBEGq& *UG 3&Lq#;rnOV)U9gH !]7Er+m\\-UmSd[U׮QR+A Fm% l a6TϓE`+~ 2V/3era6b)h_ iKkHimKuv |] b"+1٭(zPm' Y#Sy*Jyn'gm4Y+!e) Z]j(pWj ΋6j}WDzWrv]ܢ s5qT ! 2%4LB&2ѻWs:һ0 ۢVA\ݷrE'\`B V1"uo̙}ia1c`m<nջ}^ӑ&fFrF~U w,*]1;k@B,8rw.uhҹqƐ󩩹ҫgc- O&^t>{"c~cހԑ.w(038R:B3B*KeHk275:<]6̕BH8?z4ۓesr'4y#$PJJaS`/2re歹_gi.MGzfbe$ 1`;j<=hX1-9$W`ӏnKEP2#$IYک@"#3>ARo!%7-U]m[%lqЌ;V9)*ҭmQy,,Tv?5E?ǏUjES3NH7'Aeu;DG"׎+I\h9LՓN=|Rg#AxƴX23*:wB#rM@mUp{t#&yG밆SrZr"(}Y[BBsu(, X/Єu&aZz^J>]h*V*37Q|:%^> [$L5R C&4 -"$ NSڏLʲYƃg%ٳy󧵾;Fzx\naX~(Z-k\vtC*puj~ %h .Hٽ o@= M'\kca԰߿yGF}lxYڑO`+[^j0ECǩ8-+ !35(y|ކf[В&~h 1)hS֋LBHܰ\@gxq\Ӿ ecD|Y 0}08lPfW^ɈϾ]4P FHL@ !SGDz%x!0}0Lj3 qͩA`fVvZEID D&aQDJH~ӃPHyD!) +W>:55eZ@ˁ@ҏr_VrjXE"]*z0qj07Y^{oSRVp,9%D D 'mS U5S8xWi܂Qh{p97LMuk%&(l˙UDPj'ב~pXB/ak"}cv}֌ًhԅ(k>R8DS6B"Jtt+&fMƫdw%̆~s =CdB3q61јX`ߟ>}f2ԋ|H%F() bQ$ K)1PA" qȡLcx,Q /ԳT,h*N42</߆9wxT_{?ְ7)7Atw "JiB@b!BD$OJcu$_7xDvY!4uܨ A[#Vb/߱Y@!h[v|$ۜxd |f]y/$gA̓3'FZT򌎮J֥]FNc^d7f}5RoN  ^ =bss-uvkJ/cg-5ߧ{op1dSր(`@EDJ!SԪmn>G3V1w8RcEJk\Nc\ dͬfI},|[Pɩ MCNĸ8_@=.+Jym \T@zk'g͹E}J+  S[Bc }tR @!;BfyC ޚ{eJ ؄gq]XH[C\Z"C(w^?;9z{uF8l(ۻY!?9̃N/?sޤUHeĹAĉ>-ѪLD׺a]$-FDzNJ ^` sϷQ|zYF?Lkdj[GMޙL50y)wc6bR 7)$h1gDs(S8`J"C<ς% D( ݣ8b1 4\qHH$m4|O>fun_7|J\y O)x;W'$=MZf,G*oh0Nj?.$vYBuTl\,ѿG5%Rѱ@mo*zd;v^ jw8I,mcڰº̵Jr6*$id.|EG<≰WzyOD`eCUXJç2Opi!PҸB6BƏOs68"ݞS18^ rP[nK?cJ ?{vJ@ 5ZٷiʃŬ{>xmBoj/eb\ >Mk Pa8yWaA ? tfKX͗A2%‡e:i xEYE>x>\G`*(pL΅;3+nWNӴҔ_Zmd6LgI՝{<էp9?!`S;\}w GRHڧJ/>7TsY]3jn 46;kKGMOd3 8AN&z[tV v4sy!ΰ0i5֘6cǻjhDiKsGTg1Niِ[]^:ֿn|`{$8z5'M+,aKа\ݬJ~(j0/x cu J;Wl~38~#eddnm'Os5=-z}8Z.o!W@ozrf`?ȚjMCPwVYDm-s++7٧,}bɓAvW $8a1 80ZHKԞkaZW /c~dz$ᄠ_,ʖ Tޫ7?VMS,]Sc}s*ME?_mx0 9ZW) I(8JD2FHL(J(rKH r27۠xZdkz76LoV,U3 9ۈRƈ~vQz-njk;-OnP),;5n<  x37#ozf OlO\l"xׅJ8uϥ#$!€(D 2URWI>#=ۻc2   ؏ULE (yF UU6ϲUP%asT= ab_ˊ}%6w[bV9aG.e"$kQVK꞊3{:"o*əc/kc \E`– zo!!Zm[bǭ.\&u;-y.3y[md+Z"@ޠ=z]ٿ`sЁrouHAdZn+Zpw m+VK1.:rQ pDzK 7p Y'\}5.#Ӊ@ipso/!D4T/}V_8 }PӊFP+]PӠyk+5ʦڒ*03@d!z1-= ֭b㉆NZa^PP./먏Vc_.6,h`fF< hCDؑ'^g(Mo}wDqݓ1KⳢ}Ig$󎄽ww1ܩ;g^EzU_?L׹[π>llB`S?1m7j81f 1W~gupǃk{'O5 9^F똿M-c/ jt &aBa@0M@ ߭uFp\8SG`XL6%ۃmeW*mUi;oUU6oSv; }UqsC=O\Q3>_}Kgƨ~ڀyw0}Sz "*Yӻ+pVƓ?>ZOq5=@w;wf22; 4D6@^L/=gLF!Ḫ]]cK =X'Ty,I-= QGTtv(`+Qd)rlOq\;=XU QYJ$h*PB8<8lt@*F`D  Ĥ#TpO4TjUDH—2``&C^ROVnF@*Q&; $(TB:.2ʹYݑ\+%A;6 p"$UŞ,FpL.)Mi5)׺Mlc4B)+, DE ,G; $)3ł#Zg lDD" sFQ pBuI sXR~!hT,=GN"KKm-(\KIpr2"RTȶà5y[T~a< ♻sw=h]d\~{4-lAu1wO0RKNQ^N.>gcDeTx'?M/>4>Ŵdڰ޹ p٪G=[#U 88[/I"טӶ:. 6m 4)J&dd].X;*ˍL5uv':M&'z-FZ,N*ZR] Z*sjeJ!Fy, kLB2fi*ØvG>0d6L$r`0B$4/E=,ALb \X/Bi gd=p mj0UҤ0*)U3, h]vδnKFbFJӏR Ǔ ry2tgYːhxp2жl6 !|ݚ.SɗI@1"5a(]YZ28n,lx'O&Gn,Hlj›yHnzW5i4z̷SÙ_aRf)\Z M1ۏi*\"ҳz;.ְH0+z,XnHk./zi * mI|A9D'Cn/HR-P@qfpπ@ LKԯL.^P*Rt[UPJRuV:wD|/qB[63rMG\k([w㖇bI5b=k = T˖^z-ғB ]2#=c #M1Tz8ny(C L! rʕȆ[J_Jd3tr/`FK@`mV6!Z]=j=Tme.7a'DK8?.` Ay֎EkkXv^0vLh8ҭ,"kJ+? \JdKz_U[:=7!<@n 6u\?zkFe[>_3*᢫@iS@rcORV ``R3#2DwK1­PNh Ox}g &K kE6*}v!W5qs;1\WD]vd/ٜM4f|ԝh!0mqt!liE)󵤔F9Qҟ&K]gW49,/U5Kކjj3>iY լVv-HʒUpAJ@hZosTtr9߮QJю+wn'M8*9U?ܕޕv2NQd9#}"c#k/YJŭӍ=Lzm; ܖ#\ RpqyniVn8{R^kNQ8ԍ}t 8T^}sZvo9(?Xlj&v* ԛNQЬg4ѵ+~/^%GK] `y-qLJ\(6tekbB̺: [Os9pNn; ; tlԆZj@I?QPP^j%>kM´5*70/)&)f&*X{5m_x:$3:tGM(BFu"ot{oFfJarܥ? ?+~٬P 0ZsT*އy$#`-a:0>&cH-÷Ɣ`(amǐFdD^󚵩 Gi jq,-O O;@[-Xc6㳙D1RjE='r֛$ʖ Yy9.ģ%+ ]p-Kay|Jѿ}g 6ITKrnv\t( NhWWB|GCT=>߷oe6#*ozKT1kp_~:;)ByD05U<80^kr^4Whq^sš}.CJǡ#Kx*岄"Y?x~~{v*ijvřՒ"{TcҁQysQtruLZy5tĺTޒXmگIꠕ8 5lڕzz]- /!_^#=3~L_cmC|(ϕ/2yfɁǯ,JXd 1L~͒Wm4a-wfpEz;nG3 4u6hZ퇆l I2^!!m;?Hm\Hs%O{귀zr]YƶHb8x?~&8Hf W3ڛūoR_ֽŻK&P^vM=NOJڭK`]_[GꍱY;. [3ͻH}WͅyV}= 87t~Y\؈Y.\vTA,N]}o8*.vb lh],@"VWv-m %Q*[.֖hjPcO`E1Yzz|g^dqB0E+X-+4&\E`n&D]ɗ']~}tߙu&p&!{1vz!D]8b]HY'vFAjޥ$\cɩIqye'wf5Ns%܊T&s=EmDm9;z ۂ Q2{Xiu3EzX=Ci6cY庬+fv]4_eL^r$ZKPi>xu2ZD>h-B\N}'vɸ;hzCx4 >icd 6jc:ri(svШܤ[GAMMu[*nR|IˇBJɨU:ʤQ$%8,q\q1Lhl YbaȐT_=ZsɁTq]eyq1qtTpxv`X5.>4K |0'`ꤣw_G^SYO>6}G14-X?ۣZ&Y7Fɇ\Nu(+l Ĝ*[% *\Dt,.#%Pfb2 :zx7:KOy*Hsw>~vyO?|sɏ/_qY39i~Nҙ~z>%o/xpex},^zzNڥ^o-9]`>`؞y|wFñ;K :7N^}_^;3&g7;cIpǂ0UZEb z/wnє2mzь )hVa23ֶ[S&ܻ`%/&ތǣUJRz,sh 9aF]wwC҆+1,ީp\$H$TOԃ/^Ezizu>\zGܛnŽI@3}>=3ݡWnl;?GT8?OUF.GzWAPt2f>ĠEx90+P sYE$E!zvc(wcm3ID.[3ؙ~`S\;1D T*UrMK$lHGqb־)AneXe&cFz:0^Re>hKh vu ș EZK0H95/ 7^?: lV-ju>bV W!eǬ8dAozXиxŊ`1)3qՁ žYtooV܅Tg+WDžP\]Bv3Yj^Nl{˜b6Q^@ K2E5jA ,=BLX2S!g7.>-Ch0{/Dd !5ŵXۍ{gnǽ7Y/b4eƭڧM޺lFϯ~ ${*+C=CdRZӛ{Qugxc%{Ρ1iR-Zqhbm%˖;3T* 쏣L9pX񄣅hJ8JƵeVlWr+[>M[qs?\\LRJEt+7lsl7Z-\[tp a$"yep.6y\g5>׳)ks}O)%G6&+5I5}!fZӬ[[ >($Ӕ"}q 70oLD-9? )$CKHsj[F8G8h6^MҡĺW,7:M+\7G'odV+7ң1֡3]g~VoIU+Z8T+JTdBN*@aD"]Mq\CW /4d/Ї\ kxN^"D 3ܝ=#KD'y:Ջ; Fӝ fySncAYy,; IE90 p8Q GJ-$H1׵jU9LxuhTAhHINL"D$ ΡS]"?eQҬBύ9x|G.LaYNQ^$i e980e/6-6iI>+ǽ.{Ž82ӂZQE4(C-nqb 03se)G+EXXK)E pDI$T@tqt_%( TBxusѰߛ9)բġByņJ"SQ[Ǖ9: 9d`I$#:,Bpp#<HQ FfI'WH!0N^eMg||Qɪ99n[*b(ޒ9kN"]k#EL;.ЅFx2PX!*"q.wj?v"c %lHP*h jB . 2h*ØJ.1 9舨vJ?Bg>y :i;E \K ˽)=,9Ž)7zSmJ R/G6TKXi- JW E A_qabCɶ^ er qr[lK01Lt ќ 4XdT(i[L۠r .>'{Re Zn U8$6;p3-iD%4mTnjWrV_uJՏ Iwjb*W]5VYbkWYXVY5^%zS[Y +]g5ﯸد|eQHB˷VStv}Ba%M7w BL:SPI@{)*{2a\İSLH+ NDS৙qx~juo4ݰڈ`K) #EF:'#'L(8wzpY2gh LK` c+ /yl__rۡ$`ͦBsqLjnb9Ob~k>NY9+Y%4n+hߗ<kɎ 1f1O;̌ڤܰZʝ_j8{ LPͮ!Y]8WՆ)X5~ & SP/ !Y> E벼`KVp o߸jX:w a˦Z)OP0AV7:mOo ,ZO>%ZV )!UZٖ J܊J Yy wT?f$Ik LZ4t++ˮ f4ƃF8~4lCJLgaEs0<~{nReϤX ӧ],ϔ#ɭߊK7FTh ua.bþǁ 4K66C"j;S`Ti:u^t7#/ݾҚڐw\Tr-"xxQCy]伌zzXƱ"zG,.ZOT*TszRE.ȨDrSw;?Fu,Ib6GUV=wB Wj[a"uǙEȞ=m!J;iA:i )>LU"K^fPD8fa0H"  m Cy(q*YB|*D[iqA,Fb֎IQt@(D!!tI0+6ZUY9^14%j=GWfpYA)N9FIvSrABɐeG^PZ"%P(bϙ&fbWyqOFuC:@lGӫ.Kٵه9 Xc̣QUM8Yt BgZ»b:KF*+»1lJ`oXtOQQf*|䆿"&c"s$LcL 7yDMcʹUem~Ny_;L˪4T/=V/9I,JѤ&I 9Y5MZ7g4{q7G 5릝Oy`No=Hdڣ՟c'IbbTpN4Al~Q$h:2`JANPDՐ WpX; -AX˘V-XJj9unג{hfJ@nzhfsLM5[ ZocuؽVF7ƾP ]76ryq.Ww"Ö#sBb"?ylh5.=w$=|q6D%19mx0yGjR7.ϋl;9-Xp7dϔypN*-m”oF2*'\Vflrvu#ovqQ\WMz;[3J; =g~+ܳpDQ}yzA!PH^ZX^0Z eNǣ `QJO^=yӱ1f AgMQ^\jr=w !nR0bJIS3i*,g"5eBj4gQLy9.'t?#kkv'+omm, p'R T$<ƨBԴ`U֢C,eT37o"sBgh :+JRsQ-{S| ] Ai^U,/rgxuIgL]VipK8(Ȍe-CA\KfXQUNAcM.,`6k>SmU%8oun/ٷt_?{Ofka]Թ7ymUa$/rTSu~COSY\@B7݌:LlZDeNX$\˴c 8W9?gV1 u̺~,7!#eUb5alu]T>fu=1?~nrW&ç. (?͝oÇ+ BxAHf/WrVx{Ç+S~oܦ_7$߿s o{F !(lߞ}#(,w[ k^5.'JnsOS _glOĝ<;7R[-P!+2``˪gPgBʽͣrۼr<?\-~ɗkdWGt"Y%˔t=r KL! 3iFh:D;BXG%-rΚ!y0ٹ9ZDfZrjP=S(g8Dw]CqPf8I %ePT T+%F%bf79w NIATEi&q.M2x/"`p/;sftFVƌRKY(\G"VNYk AOYnŹu  3u:v˼ќ7ng*H2N'Y-,ߙ7H5)i;M)jK?"a#wʲ>ي:-^phyIy}9pZ:!5˝ױ3sdĮwfYX*,I0w۷"KS*,c]V@Ar%D {9-\|0zxAvltjTKc ]BTJ2F64uXKT^ Vqad3$Pӽ@ 3 u/ZtιDBڵfF, ]?{*=dг*{D7w iRp*<}p*YVM2'.px|Ε'~_6H382k ߽,ۿ7Y𹃎ߩs:謿zv-tZSjX ձwÞC4@; +.Enq|X5]y퀱Dbo3?e o r{eCz"~{7^>^( _WwӽX/F>-ҚG:Yn靺^ PaklO$n@]&cѶzѮ'4"Հ :v3EńΘ[^x^Z@3FSɮ(s"%Y79TAUA Z&CJ u)rĦL#6ITu: N >5$\ TfDRhx.$ZQԐTSIk)m.ʵyxxԝ%Tlꆳo$Aw)eXDQH铇1P4TWɟf/]x-%dP к卻M͒ դ`=xIAk~?} vR#+de2loϿv %4Zs؊M^$,(փ;oťTSN<]P?t]rGHVp{3612MQ '8} G̀UKxF \:y}izbb/9a,Hhw.뜛qR:iqr{fk6MMwb_L[^ 5|c$>x>; Q Mzd2|4Vөm\Ƃ|2i2VgD@,5?S2cXLf' Pƴ!,EN#$?$6 ue @f24@9НBdvMV'AIh.{*WXd%%B^o(tx芡V4[Ս3J=Od/oW֯7` voO$R DOу *H=i[ o?»C B8 bm=^3CXn9tnVCLޔW^x7|cАx]#j~c+@^*af2-vn!,Ⱦٴv~}GQCĴ찰ҮލJh4Z?jηVT|QDuO .6"#2U\dsWiq qEPq+)qk\4{ʕ2{rpS:!jâ>fuvEwĔ+<<6 pc\RJIe(Ow Pnz1Bl!Ŗ_,h6 1ZP%Rޙ~w?=jA5mz1אn xiiz{Rgjʰ+qNLB-Xz[P-Oas&g,7pe3f&:C4{Ic\90@m`Y1rZ=*$;^.#G;db d׸lj#ʡ]=_ _[4|b%H6zVGcgpɕ({"*zn4VD[tF^oDYr' * 9O~\TITH/msyv2u:BN(E1TjYFV@.kgI.*J/U܈Sŗys/a{p0)ܔ݋ڊ# cJ\$f.Ӆn}+.fqq*8iY#턫L*s\+n.s.-_ŹfǚLd_d<S^Ykh7#[FEB&RTID `r$!+ZݵZ$S> qZ"M0c,A%E9sʋɎz>pW")ӞAԡsD[RˤE}}ZlY, Y`a#"ʆ|RckϗWoxo}0z(_~iEHH`%VѓT3,KMur4?R<~hg xda )7؜?zyz^2n[& 4'%ǥo/LHD -Q<)G(8&z @qYTHPٓg.uS)B-)N 6:GLaA92Y;"rtۖ |-v}.;FN)p;8w*F|h)Q 98Gqℓ;LwIOiguDi=#)J :H23ܦ) 49O2,H[XyNmBχشr 19|[O<e6|(rT'|M7@͔ي$=] ZmcNdD!_hk'=Dˡ^" dOȔoRfbn0Sre3z0|L#<P+M_2;2-ęsUɃ"JG;S2xݳ9V@| bdBSz0xSB>AHGɻPSI0sZScR 5ҡ/6CD6h5*%wJ#"I삎$L;/ mMC ,\I"BS7(uez8_!rNf=`YĈݗj,O EI/ CCmÙUK3j/ 'όUjd])lV`yxQ' *3 jsǖ<ٲ;&a#!6Ɍ&,~i҈Bm@"|BlAY"]T%{p(M9pɸ2 F;t.%U<3j[YzJlPOtuot%]kء$4;}77!5/(2E w @;rN^M,_岺@r)t (]xH@9!XgV̈`g~fusۇۮXę#8=\'3:}"k^kܰ_<`sy]ּei8$(SJwm[!^ 1.T'rP[bc[) ?.ݯ^d ̾HIقHt}~pf&[{>Pjۥô"3' 7of$sؿ!XY|a@0ۜyb)f?pϚ밓oNTۓ~Nt oOޑM.o?ֆN#疵ȟAGΧo󓉸^r?9?~Nn0nRSvqɀD 햋\y#OȭQFL5DJ%%֒ゕGsoGh1oA~&Q+eQ2 K+?/F٬bg+W츳JSTEk6cZ ǘvnۅΜ?AXkgJ?E 7m|m%@{x>xPcgWwLRoWnZ$Y¡qpl!ocr׷ZY%lٵ[mˈ؁RτtZd̚ R0Y8@Q r1QmT ߀jftHRGFۗ.T7FwԚ %`Ȉn6WZ@rd7c_ob=듩qF$?sddS0RisC?jDfUnHz7w>WG 1)F]ʒi"ޗ7ЛYUNo$?>g~~Y|Q,(g_3/fgOF{{f "Q1* L;b%ųqC%q‰7Ǜj]->Aoz'ߒi4yqƓgƟ|#~l𵅚1ʴ4=caǬՒub@y 9md\\HRKMXkZu`"nuL__*ף}8.Q[ӂO<1w&/h']dAb( s* aSɍ %ItDaTdw1&"%K;/#Gǜh*l ̅`sX Y! 4Z֒-x̳"U&Jy&Is&&JmLAQ4}&XWlx^ f9t&ā"dKVTx)\HvȦl?[:mAd4*'?*Ѩ. CV@ᓂQ3l`"d]FY4lG#jЉnk~<1,ׅLN2 (㙲1M rTR͍ɒ\61'!SrY +dڂ`1mv -|(s̑2G*fs*5*Gq-*_@KdIpg6PI|02qe]H&CSE(}xCVK֕+^E$(d\)T ʊ6'5'HvGHyCa>&"iؠ28u}E\U'6 G4*H"unǣaaT4),X;&$nxVeFڶ16Sd}p=Uy?O#O pb ` ?MoTYp}s-g+vIJrRӅR0ޕ HӸnneضYfXǍx`'Ds2s^ﷲ1{Dȣ7Ein6ɘew: +*Nj{EVgn=_3x{(1%o#YWdTb =S b؉&ؙ͇˖?"MڕhA̰9cQ f5;RT raf׃'`e6>qj0iqP|k\ ]iqޖk6=X+d`NJܰdx6 STZrm-54JVde1%.sPG!F7PT>čUŭu˶r}-[yc/N:M\:9\9 7x,p#(,I jcBV7`zS 0( o2mn^݋Е,>~$ w\p3[yy}~ X4C29pOoOz?Ov0?Чx3;w3tP\Mv8K6@8O]  $3פo㲛Z㸾IY+x& @()iAiOE M99$g~mb3YYkor0O(j%ex}Iޟ(|D-1>lvg\}kFYcZBkAH7"Z#r5sH nlv7&&ʓw!' p.) JVX/IW" a)Q܏c>EP1H::CBō ’Laō4ǽE.$T &-(6d%32(FFBO?J_1y$9Ź-9~ 2\ | E2)Le& =1$r&5`БC36>A~dV"ixPIϙe9 ;64Zlp,9y ʌ]Nn uY:GF$EPdd|Q{PIOL* )%g75@P73lcv^mr[612@L FS0tdtS,gpC6(.HAI9\Nꩴu `obKcf[<J6f14nEӨ͏yC-Kbe:*uA H<sH{%wn'L0"x"-tMT*om_AK@#W/%E\ڥA$Rծ >xSsT '*J[{2Īٔ: 3r_R5z֔?u*#ʸ[7Q6?>)g(7 7jWן h#WJk`g~'LJ#!\+FYgtୠ|ɀ-D&( ʑD|bʚ+:Snf#כdC5(/FCY(EHl yq# u9˖J/)n  Tsgnڢ/Jȣ /k%'H+<%e5C+Ekbe7` hMKŰUɈ,y=4\\PCUeQ"p|*2V+8.V~oV_ I~SY7DXM d+ZnbYbIVn\^N9..2c"Q0 ≒OZsZZg`IS*!ֈ'^Bj%ӋB?ո x]HRoJ3Hґpk9*m@G'=WI0޵q+"Yɼ~b΁8S͋g63H ߢ4-uݭaأՅU" #F}{EuQ!R%?@+ BALcV,+jA(fQ3R918YiŨw^ b A̒Fw(Rh0&sK%"V`D.IFchDp"Bd&AM8]>uԠFyʥ(Z  I&.Q- 0DM}:DQD!HBPRic:T9#9j+Ip xڹ_I.yJDTpMyg Wݥ.tI;X!A-:T+NZ ۳C) LA.^4Ҏr G⓶\#sx0 )OɊމoTѺiijWkֱs(ݴ1V"!d|FS UiE8#R1]vQB 3^mKr;χ%9neEAqLq%D?=)XXwr!k x6DtJ8'5a0\ t_8Cq8 5j.layJFҋWLՊpIS J>6+2DodN&wu}Gv2ɾ2~OF%ކ0u]uuam'כIߧO|H]'vE͏ap|}H|ۇa9rK{{d֌(W.^^~w8ގ}"ڍr]gLrZϹt|%Avȃx:ճA7ÖJfYKJvVWn: iBiӪVuڕGQʀhHf^pevZi<7C~Wpk7 ƫ7fq6–=#iW53do/wkDCS GqS-SgZЭ}H%\9.y]cnzc%ve31@KTǕmz~ 7=2ܯ_.58qm5l6;M_?|?&ayd󔐥&x=1eOΫczrH)5C],~zpVϨtQ@Ƥsz-3UD kmؘ2yXA,&XxXOxT8Ao+k3&aȡ3)x]8O~;|ۛʅ8"׋<8h &' p\alp:{;Pobmi2e"o49oRGlR9)t7OGla njRd~5_"]QCnNtp 7w/Su cTs>֢~/mH+Bpqntc\c\ᦹ5FSG%=q{SՇk}yb쑥͈>s0{+PpߔߑYӣ ^ ŪKy'ZR)6.`aH<>Tyx So{ R=H\_aw?]][{It{'|rsAB4\]>:jXKʍ'kbF#1e1tY=7XAH5@jt+Sִ#@ʀ4kdӃ3wlqf{\;jmO6VPɞu\56yjG^6z ]q06.b x@<pjJБY<4uq# A$bѷ){G;F(t:i^?p 7ŬjZ'a=s?0P32?n Ϭ$E jdL WE xd:DG)RCOQ&Ǽ͛mm$ Fd6???yOr\ӛ?мS8(& }} !eýxG(VvF NQ 0FJCθDF o.X\J.]Gk J ݼ3f H?CF!/JpU#Mk~ 73]wM9j[@8`9( o (r<8&4:0Z/lg~֤buy~'m(QF) EJ3(vFpD˵LKx m g39]*9.iooO) F(x+Dv&Nu! %Rx/9V+_TP*u2|~7f#(|񚕾lx%{2ΥVNY&ܬ$oTfF VE:`E#[s6׷X"%'FkJөCe/:z[zѨNv\$jvF ͐qtCXKJQU,i#p;akʆACب1tp-k68/e^q)*xªG=QxJuțS4I eF\*gbLv'N 5S{2upcoR5I{H{mLgVx{3-Q Io,}\:uC%#/^qgHzO$:߿p=/ E0A! !+%_/)z{_~>U$E%0W]-TXū|oaWi}*pv}Q'QvՅg `8^l߾?cKpgqOvLibJ- u!4KGޛ,7:^pjN5vU~ RM$'wտ[[& g{xW/lts&vE&d.&/]OQcAqQBII_RKW0B\=t-.4 aE!%C$t05Ǔʒ=< -iΜ$;(ΌCg-Y ԃ4SḝZOp!Fpt:ۧeؕO?KqNءJVmoX @ G_D)ZrC}ՎVo1x? Of-hb}jqFH4LtxM`=Y-ڜ]._ 5m:v2Io; n6Qгa]%:uPmnXogJ"""a c'A&fc!l}-Yjb_3c$U,VȡX3Y9/~bO~ )>V?e\4V'$db-w [2SŘJL"mV=!@Lxs`G޿̂N8@wqN%WZf/"ѮAϳ=)R7fyJ)@Q4O׳ ,FeJԈ=D\!qNVVYJ(-D-Y'g\jE꾹=!(&T}Sn=5Tt-]yuS˧{5`Kw%av+!s٬*vxw%_ #;JL$RAP.XIc I"ib6+ʗ55_)gIR(|2SմahRn﫦=ltO,oW"@n@( F3cDZeY)uš҉ Ԣ,2,R28#ٻqf/Q&ҡPŹ$"PƧTЛb* `."&5Z`% *123NFRͭ5Vq1E2|@_'q.'z]| U|>5m>(J5nmamoE~4r ҢlBq6^*7^QWozm* It{"cJeJ6RV8oe̱prVjƱEqdz`CEeZ1T)}4<=KCw7,4Vm%0ƨ!QA*=#+eV0RAamJ,e^%˰ @fj4#v T6  2 b0"_t[{ZPP GQu" #C -m6 {S7ɜB +W 1V+#g -:CJB=#1CHs ;TcgSؠ*S'$I&% sfB?qd{$:(`:0&T,̀ʥN5,2f1NJS{pFzpj/q?;^.FYj^LQYJfaO)I 2@J(IrTh`ˁl%PV c8:~7+Ƚr3$G˻׈~X$p, -eEIp|okr}o~ށ͔=\ "bx~]4t$h& =.%ٷK/;%R?y?]sd(N۽`^J $ #.QN98(svc}vuH(|ZJbdS+%/TuT1=a ?ݡ~p;=dqU!A὏w!+==U/ ZEmn^: y\ ?ͅvZYmЖ Bu+ rk x _$ҪFpLlj4܍*wgqKUG dbL+ g?dr5|;@"E ab,V,p {c>?Yq _pdZ){~qjnZB{D#z}dz9"-Z-M*죽$t=ȹB%:  )ԞԴᆳ'߇[SdG#;{.UY:+9JI:4}Νܱ4U"usnpO8;Ѐ)*)6d2Me ~h8~o<= 4:]ctFhvknECo$\)2.ίaGS.{vI ᵒf(šb=T];ɓEɶmK~ėƾOfѯ8DW8H#N:A`dUoxiQ[+kZ]^PImO^pǣ+;дf¢[$\yaja/0Qp ^$(4:Ja9sjdT[.Xl+pfh^NU# g\(Q&D(J"f]f Z*TQbLiT5{M z]ͺӘYBS9JL1\f!j׾@EJI;sea]iwU!!v<#R!{'0 >ǜy婆&J3ؗhY+wKn5Ԭ,hI?ef˗#(3eRPZ(&GbR bVH`.=X> P޴ܫ~<>Jx9r牯Aqc"3 -.d KEDZ3Reyi)YJqqZ?^_g%$*?7WWuzsN %0)$3V;|YI<=_߀b7`܏gC74Dg kĽ7wn}xI?XK;?h7M.V(rX$U@)Ft˔:v-;UDZfѤk&姾}:_a|QdЪ 2T5}YY0KX#_pdUTFJ;e*;.T;EXbYB{]7Sa?S&;0|``=yǠۈY2Tͳ,^b?Sicr)09|}VaIj 0*8/)ftgb++Y+Rz4o.+z}|3MtQ]g?y02d$޿dzo|evjg3%e[_11ξZSdĎ_Ѕy6?\{hfv-zhQOzt6`_//{.ǭ ~ѽ&[q)Jt[MB{[,!;F֞3TƤ[|@VqcýT߷[mޢt ub(#3ɀƤ[|etC8FT!t[,!;F֦DKԜeq"PU!oE)8x=},6+!2M$0%f" Bn=Vɥ l#0_3upذ?"E<uG 8&A*1 OG'eIYsdDygqWgjlVQ#sKl㓘Y#5~)`KT'"&t߮ \'r[7&=RAT/0,-t?}v:1zRvFZl.q\*̪'gdEyJcU yF1LXŰ4 E)j<-(gf]NP7 O)SލlP6Q27!^?%0=_}3/ǎY/mRgiR}x#|+|TPM*50l|(ʃ#8Rwcգ@)]DZ2X.\A {\#Jkw|=l=+ɲw49V߬y$RUK̤O8lt㼻{7 n:^HG'94s@b gtHXA&IvILV]Ԓ#Bc t@Ac^1SVLYFQ0TiT0 0mUi p !!q(#.6qޡR;H¿2SfF,'_ -%\I @WE SG2KJM63D!#QbDk4& Pv.Щ B:?$|wv`($MJ̑bIʔL; T!ŭi: B4ɏ_:Xg/L/whLNz' {0`Njˠ#[EUׯ$?A1Ϳ8?WYJf)%i%J @5^bpZ0uNg&&Vb9\R.h"2lt%Ñ8!&Q vu\#"rNX%M3e6̥8L`N(AxA5P+zLJ#IJY ]b=X BeĎQK{QX=x+KY4tώ-&DX BX'v2ҭE}@Iic-zҭ y,z $ ͗wdZv&<@I%PDYMyqcQ2`oceY6.l0rcL6|67G*ːpQs+'iT^kPX2$)%>VYG7Nǁnm dd#!$2Hj'!u{aْaե0,>E^jʳuv  grKֹt|7@ZL`pՊ-㔫 CQI% %#jpٽ % Pp+0 DQiPO1t(!s/HPEsXTB" )#uPx[7`9NLj$)'I‰ eB`L9XNi+5H$zQ2Aİ,keJJCD iA#.ъ9A2JLp3LGEBK5k=e=b_/< O2_pxqNnxծ|+pd^Uf3^RY·4vR/v٪57\N]F$˝YaSa`]3}m%L/ A׀f+BS~Еr,;靥LzgeG8s9#\4_d `?7;yQ۲z1z3Zgla m+lP)N\ dȌB[m@c?INl3%gTb7NNwO?'WRYBs곾5WWJ{Y^d3M>gɓɓk AfY,%ǽT9(Nנ-uNI g:HEsO5b(X `V)|HJw%gU*q`{ǘGY)>eƏ {,< poE3ѢׄH-itEE.#0&{HR %8~FzMKAמIgBx.Sbƙ(v8.KnJaL@PNLlfNb[NvZA96E2oI4\^RWBɝLTB_~PDNץp,iƟO&ksz^[?J6uCCixbIEe@*¢lxvzZJjyuM Q<yiw ʝ(*B-%)ȀT1WS)zv(\gmhh_JBpT$D݊ƃaDP%=HB[*I)\&xe_*>l\hv, /tsOion$iAw~0/#Gow<ӣP4H5ƌV\W<8ލ&)BӋԵC?ŗ?Z :f b?WѤ3.u) n}``M*-Grޱ?ҍ|!@ ./.9 [#]<$ k*aֈ94 $X -^-zk@D ʡFQL3#(`œk!3Ve(.Wp\ѢGd3A Z軇Sҩ}|ݣ U֤y;#ԪVmr/dE=}WlA&"`xJ_}?-7wo׼@xR^~6_ ;qo0@NG7o:]+NgDX3`Q)o\nW~0ƱҼvťI5 )&}sA5JUSBnelM1BPwX|oqwſ@b VźAeEԥ KJѡ0'Eq5LwqepP}_WlUᆋck/ws.^V岢!CziV'fdf4k3zZe8/=ib BpIł DNeaɻ qAr^p.Q3 ZY&pڱ 4]|9b< :VM?i:50k:w>6welc:6%aurR5ds)1SNK]pJ pMHDy8q8 rI-`in"ÀP6dͦ>㙄kJp^QJέs`awi5dZEw }lrA P E/RW"0T@@Xse%&`܂ Eh%trZ/0/f_(ko2ܒλAEQLTS3pVQ|](4pa,_t セweK!/%v.+3RK&Ud\O>@?GѾ!>ـ)FuB%k/ǧfIIIU6 >[3yiAD߼) 00^.d9z S"U I=[ -wz)~S388Sp(#8pv hdhԺ{Ro<ֻx T>f4y3oz a ƊgIg(jΝ2Kya/z1ҝ;,p4ԔDqBa   ŵ+SA Ue!ݘBJ Z"Zā0gqÁĵ@f'paT`q !ȣTōvq,j#iTNiX UzЉ(,TN;s?Qv]%ggBĩ 1 E\qEd.= , @ ig`#2TKKo% ld מ gf2s-S@8(a\VSɑPy}rFj?yͪҶ&qM`!.v oJ2l#R-e'͵ne x=$kھfdudi ݦɢ2Sqv*inWL-&3;^f@pWlx.ɷs9l1tS0M;_F  ?+J˵D0M"_gu$27(xfbCI)ULNǁ(9t*(ʵ/Li'5%+Hۢ ,k$G#ioS4,p _,M?,p{35Tӽ3p7m` td2FAhI#I +8wDrs'o%7f;fk)걁n&+*_q&\Gl DŽM}kK'*q&`ʢddrP3+5Oq68cRnbBUI^wOQev+P˷i-l=z m\ c!A3rIBsKVh1!. mKkbk}~+IJ=odj=&&UGm7.;(JFbxB}c6@Wܨ^;v s,pY~đsJ)iJ2Gjx>Y8Da;?{ƻ]IN QAIoi&¸Å\"Æ\B1f;yPOyFXo$[V$y4W$YW]{0.Qxӽsq7-c(A-Zci=-DɶSGF'ªC-W=tj*{- 5Cb޶9Zɾ4BIj~/wCFgtE7۟)-~)&yx_>NqधFy3;Dw 꾑_vAnlгI@FZhӧo;9VXnTYZ.1` $ D4Tέ *GWaV8˕! @Z'yodE$cJ'd^ljL$ Z*^aOS]<Ռ jl@\*Y .qQRf}I/PXQ_4(sNymV aŲEff%OɌXB+N;\TV  Z~E9$Iad9edہzc6Ke9/ؙ9в(QϠY;Ź/Z20r=VW([݊uN:3C?&Yڠ gGۏ k4O|2>,$.].Lg4co$.6ftxeE~qdDdfB}SкxA#Agy9!=q~E"x,g!J)+=:[W.E2Uku.9i.jۯz%k,HmQ<5.Y>5> qf5nCB^h?k\潀jpm.NS@g Tkvn_MN+H`Z΀ﳕہSsѽly5&]f퓎0_mp[F6.//vU~Y\@T*h |lA "}y1`͏P0F:#3 Ɨ$e A3{1 x9WpEtGL J1L\d\y)gTj\@ym  kzfOB cr0cZb8Dk=yr!g)m`D8 0vey"Y$s'^~qG SG3y37GIJU'mgy_|&O2'Oҥ"EjK2瘗I$x6 T'fE4T[sXKUkB3%"M?a\8 'gI/gM_[~Hxʆ z;m:5L:5L:5L:5rn;iGxfa7ƎCZ{n"_r~}{Sn1^wߎ8o BqCk2P3P8( QHhn x4?i)`3P!SG  }lȼCQ 1Y\*6 )Âk(DLj5Rb7uVFz&{t|Xi%{a}^aNasBKObt¯ҬH6,)a;b# 4,.8*p`o0ڀH=g7ﳟpSu}E"?(n5(%X"r6L~:ϟ #NOxa>P F"*EU4zQmyîB}h9ż3Ƿvnʏ&y/"lɽc˄xN)"A¼vVJv]cweAݜ f}y[AS_8ŌB #Dp݋w2-}pn&EM8?PQ=ՎNF+_=8#7Ju|2$g'i/ZGuHuӏ һfgw8װ|L4BMJt ft>tꄴ+@ a*iC^.aP&@͈׮odSk(!z齢W1-@cJsY$DR@5Cb[Ӛ}ZZ@K^oeH_"?Kg!Y Ѩ}ƇYlAB^׸u%:VA>u;"P\<\ֆr[Z / _3¶K,s ;A%' :fO!\Bq UrV#ۏfNKqsre6>Fj[a0ռh6W8ޯ١簁)BS"űRDYMq3#OEJznl{{e)*L%BQzf5Qn'UfݷŮ{4aJD[ klń TtSc>mՎ &!$s.D#_V;`^{#C)Laa:A,Z-ՖgpcxRY΃ ::;Pu<\5П,_ +vf dncS@8N+Mݢ]olO_Ըkp:\oT fB.Tz4Eb{f$ئ RxWez(# {uPҌ)vw_ ӛ>.%{L(Y;sb? "&48>9g"RM&Sն}܄PN8VJ^.)7qJԫs S)$[{i5aKLkxwl<׷08[vtSEdǼ} NB$y8)Iexߢ\AQȅǗ Y eT<0'T`X/O֌ۊ2AÒcҚ(hRs… 9#"rψtR)4渿EOxvWIJv\{V8)-+ή#_||0E; h֘RrrY/쎯xL03c1U"ǑItN` ^&Az1:0'v{QMAW~:ݗ. iNDێf+ߦv9؝}KJ ̠\Y¹1G ;*\ԖhjG "FZFPbJ(KսJ]ځr¯b*&GtI $Va4`bsi&;0EBy#cygNyJjl.rέBzSUg'8HQTR'\Šak%GC8&#RH)PDhBvJPށ{"S-9¤±QBiE5RH xP Gzp *DNE4+ANZi>xN"0Xnny.`&iL yJyI5 Ҭ xBq}LC\1~K1ZSMRC!ؖ/4vrHR|-l*7O8n¢agM0+$6Diӵ,{j*90nT(M)@rR‘OJQUw?W`="^McE4Jɺ"(G.LfՒ{;j5,Ln̸T:),W^G?jB#dKv΅[HDqD(%4W XiT%ucb}zd'xg}s!yK{uۮOgPWQ㲦LŽٔ ^NzvîhIB]KBgAqA> q I kgto7o<˓sB]޽O,ZQ`5< }n m -`6*gyb~;*@`z> ܬ)8.zd!#2EA$1=J% Xi'jFy&-urpUaBp0cZ$LB3Fr%F`m tBq % nt 8J\ [ 0ӝk܊ff:]˹hUEl~s>pZ;_";9jB+j`B¦OEOmcXxOKx|L1VYy3!Ĺ,&ixwc0˚AfbG0QP"?)/ϣ1hD "أ`ir׼$|.c/1L3I፿.  VS,긺o :1<ej@ .wOW|2Y$ЇO%;#^"e~ZLżhWWǜ~_%*)3 e<Ā{!MrePMKOA{*].>4 rƘ&BM?w*b`Ƹ'uCrs.@snA: 5N@E(p +3&Ȁkĝ]b-@YVb,MT/)/֛Ige&$rIj# Pt8#$ ܠH Tn̾3 :u& baP1|>Po(bIKIͻQI.ICp;Ne6\~t> ڡp{|R+O69+Xo]6 kOrg d,[z]zu@+2F4I=#T"| J!p}E8 l3[k6l:g86( l␄ fO.2QX>u9buonF~^T1G QCq{&sMk9Tڻ>Oػ>_/ٻ~C]^4 n)Юc̐SPm$mr@>_/7<hylJFޠ5sr6FyW z h@d_Tvc崰ؐLd l/cR_̬ 8f֍<ܤ1: [YYWEfB#g1rʙmBb[z 7 9՘~c,*] |}-}y[k*ە{Nf1C)y^^`?kHx ]9EouZcƙ(Qv\cĖFLju ܊cV!IYpX=8QE OQ_-uX*Z'7bU]:^O9+֙')92n=ڍ['_;mFj21DH;Zg2B`<~fd\ϫs툄'DĿS-+ f!)d_fW;NR5^An帘? {(FT4zMo)e <0vc8w}S); kIRZS Ԝpai2E9M3F)" &`cL:K1o*sأGA]7O ?*"eM└(Jz"U)HsR)SBa%hPsM]j 1 %NdOEH'JEB9ʒ!Y(SFDS+3 x&R+0ȅH\TE8ʐ W R 6kyl %?7 &,/^Y';1$X'Tn2#'&ak$ &K3,2 /S3Z4Hj1tqSO2ʱ"%+{g2'IɹUB&\V!np8LIH!̅"Bd DJyhR@̋JƱ`_I3yʰ65Is&S z,Km\)$іG;8[Lq,)Kq<,NpaǟL@T)jEDQ`p]iwNYPhk6s  t{[WY\nlߴ8fٗӬLimlZtVc-2S8t#0]>p.B B°}뎮!Tk]h)E'SNWנ(淫 {ޮήIvjJ;@(%o<|~Q$r>o,zs "Ow0`\/&|uZEW)&,sܭMrOwV!(73I,@3ٷUSDue iS5hUU?0R~1EEBvˆ28kT 7o|ny2v^ݫ;iOGd퓅do~,ݨJ nܞjͨ17ob!&^N3W7"Q GY,Ҫ>-JFgnlߢ"5n Ƨzw+Ms(<%`x?UyKk ݭ?Vu௙c GF4+Q"-tHWwE׎NIݲԕ׷;) ^Z"Q$o LdSe۱EhpT3tj55W` oDj:CPfJ ֯iD*~N3O 8X<@r돷7bZ7>`N  wT c|l,][fJJڵ?;5h;ߍImQՕM !J)~x) R? V:JIJ +6 u1%Z;ij6d ~P`WN]9vUYmrh48c<)! jhJ255p b")QB$"2n= _]@;E> }!Kbp2d[zb-MZ< @>JВ#QQRPeff޺w WM#o'gw4mN\ņ_Ջyb; '^U/^lnx0yѽ18|:JQ1剖Q(z*=3Y⼘(r s\R,#fHrT$CJst"1 Zܑ$iajyWrmDf ai&9)fR'-qC~h14RZ(bpre.uՕegL%]ЈR%Qx `Z(#4fJ%[/[e&4U bBXn}w%w D~ !012  d\[}$dTH eJ)IDƨDZUT E!E,58R)y2L)7wxi+8DuU:4+5H')'՝dAD4hGy|E9uw3AIۘFI!Q;r.8Hs߾|nohV/Gϣ3 F}BP[P}F=et%r'cELcn 7:yaGm-[lТK[aaw~+-5[p & #)`t~ݗFP1tFPu *,3਱_ =ђ.>R2Ԕ,IVGqF$ IZd^i‚׈tRa jɓTd*Gc *muk2 ;p* leS,ɕ2a"S ut'ib4թXC#%VgS^=CFgTJթఴ)(S'0@#X6w$'h,;إ,!JC K%ZK ..^+V{\'ϗP7iX[ϒj%`C$GRfkQx8Gyh0c$djPX+4 qLJM+!1ו7gPկZM6AxWjHS"!=TQc1B8m2-GZ(1:_bK4?X{P>df:Hct3[k6|٩-nWMX]z64_l~_⋿L6Oa\nQ$ȸxrk7wn[K}ourzt\&)>-GLhӻβbfr}z^kG$?N=UF֤^r1Y,ž\272Y_#ߍ P FAՇzŃW?h8"8Ƽ"kM Ji43\...O -K `xwDC,h.|,y@)7<$2҅ͱV]b1KI<9^~r6>z1(YHƽeG\Nq#\_ ٗˇ08`7 =I$0XEVwk=Kd$$V^$ݱU*^sI^;q )CY { a͎c8vUwtF=+]+}U1-p*MӃXo*RF|agqI 4jP)ӝ: _uZk%D,l' Nަ3NJdZoox4V`4NH#,}gdFĎ5,LZw%ƛ&,Vq^QSAH9j(ȕ,@%˅ώ1\<ґM>{yD%绯/ZHrZSJ_ ߥ5Aˤ3yas F% JьiECHcHu)nM?=F tkː>ŴfHQBXRiF{@'5l].|Oaw_r7rV<}eպj}`%&uK90֝[W FU%M~9YTur>5ǎG'#oe,Ht90ߝIx r':Nr8.]!j׵S }(eg|3W=oeOnS&ڕTafsZwy |戾$w*[ P΋7KVl( |Lh+OO~s,h5ݿk~ }JjG|pt]˴N+fmfLfg~ Z;Bþ)2kAqh-0-Hk)EJN77-q. nctF@i)#oS^.'>|v k Y:גY:A嶰!#ύr`-"!1OuG**ȳC"6>.cyuMuL),hW5A7IhгJIn{BF pJz 6zJ8n:NRjn6\.$>s%lg$>b$2r*Q&- czr5J7qn̷{ Qo+4”䄖pۻ{K2/N?vL~OM8 tsn{( YكkL[~D$r48|n当e`O<eLr A'yI*I#{݇g&ݻYnYxY7jY7g} /5h%tbld'Q凃sT: =(#R@<ު.WnR硉ζm8}JՕj}=>8gwbՀ!B=OG=ON`] {3(MWw!^JľҖ]3ÚeeM8_9ӕ렋9_4ᨚbp{jMa{Do֒TUiQ**Q"L-;PIQٍPƓLX'뭷 (`^:ȐiuWJ}VȦԧmnѶ?۲LE} Izo6vCgnV XOa$5:~g v-. .2hI_ABΣ(3F2 I& ǨSY<;2z2孭& :f"se/tQHS" 7\:E VM%)Rq {4~߯#r,h7]o"^f9]s٬Y|>[wߝ5JFNѸBUf!( ,SjQGչNLTa7 /g3S9_ЃlIs߳h zwQ o􎴹{DkBV=gR/۬JO.s`Ju4@fR޺ |`@i74Sp.-},/BTC.Rk5̂otr X>_& 0ǟYӭyu5_.7|a¸T"B-_,3CVD BɍJH_Vtӥǭ HU7J~r'iM}\2xu#΂,)s&;+6`@ Gq9WBUaa^.YIܘ`q)@>8}!SԎ&s`@]{HIMrĔp1`3iy04AqDn~EUZ*h[[OTsD(C%&v+.yCR! y0H궧ai(C\53] tHէ4"'eDz'#pTFw(ʑ~r?I|GS\g5[aV F![6ElQukxj2=Hݽ'2ACJF:I@YK|tO5 ۺOiN ͢xظEؕzB6aKɆk"G-z3kĨF=I&Vr8:llb=dFpt܄].Djƶj6G|J\'s1ٗ\<gzZҵu-EbbJlmOvV04TIzĢg֙t?{Tk, 3.ۛbUEmRNXPy`Ãn4kOdWP fWXxbc]h@V$a?:CF^m+k i:h(2*F&cS1gx*_;,1j31$r`ނ* a iX)f|͒swi45KVWgWQIm}c%-ĜS3gs"=Xnסƌ(+}WS%@'&dtGE #Ή"AvcbHP-Z&;&"f~-}7/!-Ĕ5"{)ǡwG*('!q :MX4Hd򾿽!$(\OHy6Ո9-D"t'ڪR8ovӡU-4Cu<9 ɈݾGs5>Qj8 tGtPo-6")޺ !*#Iճi">@NK5т)$1s\JaB̟ɽ^rUr 2THb,`dH!4C#r" eiaxF2O-()yjMP9';x;JNwz_␘7&񕢑DRNH7":u)EΉbP\>%"ùͼ?`n2PX C g#iA:pE| 5BKIQP 꿫tNtn %AeɌ/Έ9픣J%i >V30VUF21G *=#)G au.CpKe@Z}p_W=}ZM|wXE/_ѫo?>{|Q+?wg~<s <g?ߟXaᇧ[;y [xv@gz#_1dU,OIb$y$JcO$9[EnmʒvD*֍d-T?\\' 9_jtDL{!{;;Dm!'V{~>DgE![vZ [cGT?tYLMKEŮAX$nLId]#+jl[M}̌SKi2V.cUTQ,+jV%/v mHL '=`]-PpXboPs.kzґl mm-)uldol%54n~sY\u}BuU,doOd(r]|&,}Rm1@|lm' $vQ߬ؽ sg+EwZz;YY>Y!v]b^1gZej;懴v)iQf<:YOo'3n{=^P{<-:S?zi΍e{JOZv^,|U m2m(:o!MOg~]>ՔxevQ5(hrGAcϷ̷45,gMt`8ߐؔȵo|ŚVh,`LuIVi=~:ygF%]mX[v^N_EL//ya ~D.oCOUG9 n̊::}{ȭ>\@تzqrgjxzYh)*.)]~/ӎ-WfuXۇfx(m|}XT < FZ֬+lv: Pƞ Wϟ{dr8Gsb9S: Il8%HG -U:(8l*~yypn.UޓVib^v??nX0焂(DӪÔ,NՉQMvF J#nwozNĬz4ʝZ͇*Zcj%'2}!ߪdEg34ME\팮C| &@UtIUj27 fOqC6X5 A@ ǥoIPj+8mYZi<7ԖG$^0yNAjF?!Q KDC٢뫳_#䢛m~P"hlEߤfνÒR}uAs gibΆgVzWFw^\3هř['9e Σ, x9|ЌO?! 綯d0MC 8z}~pklC©vpL~x1ǚv6C}loWZ]l*흨%dGթC%\cļGV\ f-XȾFD&^kJKS匳YS [1+=t_̛MfMm9_/~ϷYoxƼL!J-1u0Cc a}=Rf$>Xd}o+A[S=׹eFT@_ff0dzL4c}"jЀy:M&Y ~.+#oLɳ7y}je> -$1E {oΛAz6'W#OXQB/!yw|tlqH<&`7) h[hȰ 2ܬ6zZ_Q! /0̜ /EV+7 6NM.h?fm5jACӖzD( hR vFϧ.ܛLRmz^'` ԅjl( E,* )cA$[-͏7cX'1d2?/bW-;@xy sV;x֙<lF,3X#5DDM̦66UXBָ:*|8?^OJ>K-}QTzx'U2Z @#Q'Rj n7 k]^}71D  ߮ߎ'J"CO=!Nq<ێeS3:L!O܁>v*z^Ἓe#$*޸c[K8V=5S}3b`uyQ j_%Nc$˅J\U*k[S~ǂB;rɎirwwg<dqh3QnGas?3:ξ nlŔ6-sjBv ,'FwmՋ8,ȋ!6q :V)\'iG(KJ4~69*lv,\jgTepٽ>|Nm=`q۽<!C"&2ݸkg <з/Gh!ota&& aLwm$o@㧮xs>αO-;O7 n#Ni sLΫzoR-Q|';R۩9d(YM]^C)i}8Y5onT2pD ָv 擦(C!xC{p~I9,nXAX nJte!.oÓVF|+[,ԯk6wvc YsΜ:_]1]6<0 z;*QуLkdKRf_=k VZ}~ cشx!'Piڜ8xSZMy`_D8y/H{̣Ȏ/>j>7woonu_ga}jbrvoSJUVm0*kTzD\+uXmr Y^U:*z@yp$9EuTe k >ZvAUjkZ7TUDX40ĊkQ30 "N+6zg4:]A6؏NA-.V}ͱvxs~.$%iz}yI{-X?\\.ڒJ;p/59H5t~ǎABNJM)=h.Da'NN.!Bz"v+)AP1W`+QщD6lX{U} =(si52e;)m&D/;fN&+!+`0NN7Z[+Qp M&>1z|/\\\޹I0zb9[NӖ ˣ_E5G>`Ǻ98Ǻݣ>杅믾]\)#o4lj5ˤK#l[R PRAm`xeS &j;I4HV(ѼjPA"mf / ,56싺FRڸ:J^iTU6Ȍ54MkqH@(6`J }hnSGL}qg;=zHhǃYd neE+$o}SW5PYGoJȘ2(("++FH\X_C,.xe** Q \@E1Dz"6M A+Vu3 0p DYr@[CјF6zRDm=k%n(4 00~ ~U{*N~_79PCÁ"c_~؀>Io)$@Hg?wX<3ޓNhg5~TL]& ze x9eMv$.y#NBv Zpg)yK(QY|MQ.x#94af.> N{eڷ'RVcw^GP6ʊH 12c~r # 'aN$!HtK]j洨a7= %tEo3!U&38Q֣J9;TI/9sbSs.As m;(_ڥuRs91:}ޮAQl+i_QZn;  vcq ̠2ªNuf,#K̴7FL\f3AgDi֤mo1EkL2xER2̝%i_jOY=fRr,?.>tNbO4?]UgW_DjȻ9g]wemH02;[zm;އu Cjig&xb,Jv8$,/G-@9az|Il#hC诃 `/xO}gvZvޚ3lXxQw7v^/F|/sP~x4'뱇'B+a3`ObE_qg:V(P>z 9ڝ/Mmeq2'i17.+yhx Xk&KEۊ 9y?ݹ*OCyJ9֗#A&t:VYna$ xCC,=Zգġ UmNj F H0 /NJ[fPAE~w A}: ^gqh~>C{EUBK5x.ZDUCB\xs/NH xo&׀Jc] K y'TK Ձ`Q~KxJlqd&Poq|[^¦tx~w66q+udٻuW~^[!χ;P.9o:a xSw«O_}Ppf͘b5^ .LlIA#wR Ў~~=_.46|9fN{^Ln7Ajs|ƓFcm}xU ⍺}po:H+nRbR*A1Ha1{(aPl=HKYe鿅Uc2! y"ZK$~bWIo?qBVѩGv="ԫֶvݚg.dw6&}[翭Z˹!ok·5ə9oj@* Z)jtmR<t\$sr8e ptXi,xLl#i1(Q2%)'Ln`lA4vDW'$|6#'eZ?y!"c12;1@gLLbģz̅b9N[w7CzV`$薪>e!B(5Vۂ]a'z N~K9p!gDࢾT}B#T5pI3@p%B\>O> Ӯ\%OyI-vE5EMv76/Lr+܅I"׋k~Wv:_܁e>3Jl> xv^f2qo\&dhLnV/,/^ %{ o~y??xë7{1'a&NӛS`n×L5jiʹ*L oP%X8K12<Ě,)I1Ca $ٔ$f̴# f՜pDI&I"34an2Ĉ JRRK.SkP3|ϸm3׆:` kn/{}=WVM*|ϫn^Jj/p O}Z!r^vL,hv{ iLebk|7jT'6g1}*""}VV* J%=DPYO|9}r,8L`$AG.$!JZ13V&C|NŭuקM}@VP@ʧ;~2RtVa wβeǍ!&p88̫' ]T&F}_35\:`[5\t %”0Z@G'&+rTA/Ghyº-oĄbxN7Xؐ)mAjQl\=T 8^¹1Lxͭɬ Z =7S8 ^l2ⵓŃ΃j{s< EW +lբ/OuU]`gs$FIbd Kbc2lU"R[ PVE*&l×%7) Q: b\&.BOy>//]FaX}~{ /x >i>DDpK͹~ Nwh8pQЋQ~X~g|<h& Q9f]r.JpӨ&0=5&R3mL_' ,rYz# rGz&PǖEM8i O>b)pYSTVw34ҀRiV+[hVƆ9=P˂x4hH ~'law-B[W# I_z8{%Ju./ʏK"ds me4e%~ )i ʒH0G,`#"t,cCitwެL7`;FqqD3U;K%;ъ-3H(針?'_="E?ai}PASt 87ibݬ/yɤFm'HVD?"ؒ[Jq[2mۆZwUiTCiTBWaH3)ؿJ:#\c ٗZ#OLQ # f9s~8OAhhYY 1~** usMq Y`A=K,FTb/B;Y@ #'פ0w>D'?"0͐͐ӶH,'.U0v4®ؔd5X! {0+] aX:~B)L䳑2Ѩ&ͅTJ& g"ɢO&~rHgxgْpW װ.Ґ>|;@7=iHq #f(+"]!25h J&J҄f2V.` &17i"!paa+ ۻbhwZ'ջD`"%$b`E1"R)2TՒ䢌2lS8"W^&).45 6!0URzGjQ3A<#foEAah:(yfB(]%FoN,GY&ILLQSʭ"Qr,#RҌJ >aӹ$ʡIŖ  Ն i'B!A 0_3 bT"2Ġ3'!{.c+ &mֳpZ%-3Pq1nb16iDPWaK$n1" #2+P 23tc;H,$(\4~-u(;wQCP/';[Abe:²D*2UJUw UC6f>|v?7ngi`y./kWk1<-,## a]shpAYq+gTojw֞R.w Okcdnc6XW!h86[1m; )i <-3?Lq0ۍdy' mdx5whAU-/a6rF|kT k V G~C]VA Ί^>HvxHphVW(sn=>Ĥ@fDI s wΦe'~isQg[Ϟ[D 3^n/q9 L1oƼ7> ul*me'ƲnlZfͣV`U88m [Cv ̻VQpF'XV+;^_L{1:nk07G)h3g6Kͫp>1u()wzO'6/*FwO쪴(Iݔ/ =^f _̸|BV FnK%\8-w}Ryxkg+KgĆ$䙋h-򖎾mFWڭ.!S*,ARwj4T5!!\DdDn5իiv<'Hq7u<;~ZR~:z0}Pw4wd3U( +:- -]jkբ_B.(Ҁt5Ʋ=j9dy'Q? !%Ҝt#HUs8Gf6Bd =Y(δpj8Hzwq啶jJT@b[BhMغqKoKoCJ2CQ.G_"9Z>hXGhI)8TGI!&mCK/iUrIu*q,ݥ=h3k Pj4ؕt>λ~M;ĸgsL;zXw4YY&B7!b(P C}\W'cD#ѸUp0;ݺR¶ULXk۽Zmm)h¡თRQ=%HL" eņXXp%!Y*Td~Cn Ωkp|cjj NPI* a!r H@e'Le,bPZjSG58<($K8e\a ~gjH,i& FqnHRK PP;&[5GWkpޡjx l}cQr{5n5*SKn7w8-!3V.F ̫{ghH͚pVbh sDٻuBw?| B(nW;TWj+!#+tZE/oGe|`V;ʎk3V7'%y nQ ƯlThLFrJqvʏ0'Z#]YT%l@|/+3P[rFk:/ E3 ]""1oxjZ@cذwn+4+ʣf]\e0Kl0Eq$B !=ؽvr5Ү,i)*q~CJ3CeنMKjK`ՊZK̴hq–Kh>qP:N'OS{6Ei)C)ݧxS6 c`/]@0d@k DԞhP%i~. lei_*2 V&GUL )ƂRh'gMd&zEeI}r^fO| !\ _/QKVobU-~\ o(ށ7@HsZohy-mlFּgmoRK~@X.6@86Cd}me-%^ y&JYn9\Ff4arr 12ƤZiF ܘ4][tUF4gY%  yZdu7|f@u\0%QO1G 5&Wټf02̝mbpI&#x?Tgo  I*0kNieʩO3uƙ)5L'$an/@l x4>98Iq:! "x̜a tk p!Ѐ¹7Pɉ>Wh4NOpJ822KKag9akfu,Y(cե,KgpXdZd_v:KM%e1V=6 wsE7bԘK:> "[1%҆ߪ KDcĶޯol1yq_|^r(a oB!@M]јݱmn-޹7W '+-SP*&RV\8TZ\8-X}sjH5tε$Ȯ59aym7~!)@M^:( 2`9أI:@S jk&r!T0.ZHҁA##9H6OdZ#kFڶōb"7Fp.3zP|9jDqT1[.U:g=®avoGUHM X17)jh꛰.ʆ{'4ZZpXA2I%ͼeN'oappŹWQCРhQƠ0E۶wWԵ#Wko15*L4T4[_Ns㲮 w튵nu$.X T>=a<伾{w?]pS -|NǺH=B5YBn |3!]3f񗕣+Jn//,&ٲ NBȇ|>#?c(G6 16[(6 X*cZn3pƘVdvSMwлbPe=4B_8u*wOE a!_v9}f8mj5Z#=λ]Sb]fݫiw6v_(wD;ٔ⇶  o[ÛË@@Ԩ|›O&pCnjx{Ȩd2w`[:k{}I삶u؎mx?OAAq((Eu!ggᱼ56G:*ɨڻ3 IJێl0{Mn%,P3PI"/.RMVrwL sZ:sghcGB9g11?Av0df7>̋>{G" 9 Pk"lds2EhaCR_64qCw @ܳone"t|֯ڡfȵ\hX_4Nu0c[ݳ3pU;L/W:37{[  854ɥN)RsT]aZ8FrVw|d4E0Hu-&18ē!$ @1RM׵#@PsX;nf:w2yӦAĀX;"a ̨ E=n-tĥfD= J&ͫ%?Y[*|Rl@l7E}E@7cԐk9&Vݼ?3>%՜!Zܐ`yL,[i]\ڐ`)F_ ZhT];8,?/ vȡQBK[5$إznHP-Q0mbPwFC#h;-aZ7]/_*2 V&GUQ J)/#NᬉZ;EAQ@؎A`wbH}X+_ 8MZ K"ȃA'<&HsӠc . 0ВmIuP(5)x~^\ zeI4$}3l)riU[amתFZ1#o>c xxCP2eZ\4 ?Ƿ[Tu?k%%,ݢVHkIwz"1jȍ v~|o5Rib7!جo:~QjY;Aӵ %k/۠Z]ܴ^ G,oIW;(Ʌ Kkj0|w%qۧ-%q˫w>mՊ]S SNuSЖ!C.u7LcqXb "(Az/*\rvU P֐(hbWa7`0LPB(N=s^\Мۼ^E@AhQANTPo{,ڋٗAKi%!P@P8+u"1痋8ǩ$ǟ?C?W_.?|PɥIx\=yZL\\>]zl*9|׋HӐl[ j-\t3n?CBXMiEU|syrjC8cczK&)/%iF8,%^͢ONN0h`JgtJa:!^1jI;ep.荦 )q}Bq*x̴u$&/()#!)t ƿ9 20ssWmYc6)5ƕ)dJ|ʌs ʤV} ;2K;C{tYN3kif :=r{-=?ǪeJYI۽| z<.n޵6r#b "Y OΞ`s%vc{-dCd%K6[nbe]Zꯊ"Y/%5kn2o<_V]ϯή]q[ 'qV7!+yLHT)"S]ө֦8X:ZIisEVRx6 ZIaVR\6a%zBS#s:^EK|yk[dרZeJș)GPiKX;W"Bl nԔTH@7v!bU5 LyN,EJ pڇ93*PK1yrtZb-AM^pԔ.3v(Bm-PKGjLSRhѡ- Amr.㊕Ԍ { dD"qC8жo.$aNhN{Su:j2{`7g3ۻ7.Ӑs1N=E:m,$O!YQf!cbJ^IvRZlJ3bZEָ`a=a_yKۓqߦt V;sܠM!u3~Hsa=!Lt"hܨr~f/rt!\J5S f35-^RM1Y25-艩F9e*rOqtR!eIp/1/VmᚨñXXQ:bj]!rEA٤F[G=e%(_ S'+Ax7c {(ʞ`O/ lX1Nua=l1q8F>F[MzU#oK҅k[;?xweNBGFF, }ӛ:^I}e92RW ̪dx\Emx* V@4Xڃ$}si-l4cUMO+ȱHd)zǣFn5;*fGǪS&"-j4R*@O^ aDOM IF:9BɐAˡ~N<Z?0s18U=C P#UPd-o3NZ>O>z)AgF|Ѝ7<xΩ.zzzzVhrd pRZ OAk@B *^$bO6TtB|a>8[fSST,*Y=ZJICSD5XShkbAgg:ǐ+p=8AY-A" sX]OqGR\$d@&'1 Nh'b8nGZ$ݤ)CMȠI*ǝD0|=祃;8l(P =} 5h1V1ii*_]$))>ٿoJף"j 3zI%G!/b_4>\r_L`@ aVHW嶪%" ,T;x\LxւBNK[1&F޺2sh顚سXdyV 'GLvLϾ؞5둫My?R"PϚx+ntV܊8,\}ώkcV{6PIK;{)=:;ptti OgeJιJa>D-~tyF1xS{"ȱ:DZb5§LuAK#6^,th@cKvk ~-BnWI",ѓ}R"$5H Q'>^ Dxn%uXfw%?]o S(8 ch'4(e "C)#AZ𮶞D:NRsT=h'\R≣`Q{6Vudn㛳cYlŌU UZ+@i^ܳJtD Eҍn.7‡5XL@\m@] 8 ȥ`蔌СktK}(?6O '%j* ئnE;4Fi)SJ100HBt^ <~mG_S#>b$Q`͵_JB&55Q$ԕf*0 lagFںNTqy;aZcQ-P:uhIc0!04d*F%LJ| y_n0ϣDAxNFzmuFAn-W,="]V@P裩[:ʔGFhq8) Zi+-4׶Yy7F4G4yך :6X3#\J͹( u?%NNn;=d=l?<}٩fggZR1NS*ETi]NT۠q-͐Bd4x/k {]Is A^A PGXX&ȴ6ς赥* Uq+JUw *m"|x K,Wv֭%׬MkQ@5Ea$|{}w,mjc˯ˌ29Z'gbzQA]Z(^9Zq_=' 7&[֢oVvrdw3!]TrH#b}u ?!' mLrP MFID|d̋! YblZ}zrOvTJN#Ƴ |WbBI1Nbs؃w^ =N0ڢwT-zu(H;IymNIe"[Us@Ź.H=&\*%7 u];#`Uc7,2ySHzmxZ@/u[.e,:v`T >њBq~ܔ:Ч}4CmR| NhxۗYJӅP|KTQvI8owӛKb ~~a3JễJ|Ai }H7ww~||2(鴵eW]UKl]zd"5 zŠ-[\8,䍛hMmdpR+#Ո*hxrrKcϥmtq44r˓2]nMtL辈\i4//Zo@j(&5Q铵5>O[ UOZSwSf6Ԝ:ϔI+H JT^X>-xRG Q `,\1(PNX)e-p22Jmڰ?˿~?5֋NՓp޿Kd ͵Mޕ R+?ܚbI%SCHRx;Sdo9eʱb_ZAWfuUݜTU1>Z%bZ)(VtVh=wS1HLGBIbOʣ b>m쑏M%-ොǘ$}-) DӄX}KC68P^%ުFg=o1'>Yal%&`UmXF):MZ p"ބϼEt[ϫ/{1ጌ5SeGBNh E|V"=kNԫ=.ANk*} PaNמrV\sfjoH)w:CmT0彶,잔N GW&Ǩ!ЬUvd:NZCz}Cp)>'Q"G+^/GSՎ6m?.wu TnzOǝvʟl1:޴ HXk'_[-=_B* h-KUf][(v!F_b;hP1;v @v:-Ah}u:ݱrepI)/;0>}< ?IpW#Q@9;8 eM4hR3 b7B1ApEdLqʹu ܳXqiH>r].)=֊[>M^UUnUa <c $A6ZU4`=7?÷qbrAF )2%EHӔ~㏈t 9(:& g<HJ%dݰR&2 x^3De@QΘ16nChmMA4h2GUq VQh71%ur2s 5S9P*7L0iCrj.>nJab)%:互M_4 hLm$ /hTyO\ԜWWT㣇un7x7ztV_ ?<΁dRJE˄"1. FbDx( ٻFndWd2/`$}8{`:DLMOXn,7'i*:9?GeӶ )ePD*#JeYŷP~}6 "@Rm|?};X64Ӽ§(OP~gfmHw64C$`l :ۓo0c(M!l>x]ǚJIP bY#%,kM/Rjߗm;$=fsE0p2)< ǺJ!\ǩηpLl>Ͽoő6@9uT64\aգ'΂RՈmsn,dk Ƈ-OAZ؞y& 31" f&YPW3bM[jg{Xq&iVwҊܨ(s K.Ն%1vO1z:K3mIZSW 5h Q}.1>7}  g:#8烫i)%;ڭ$!TGu;k]zp֋WjF՘ۏ6Ï1w=kFU)Ҳuv 7ky}M^5",{A=!5>?u||.s<9!9o63QWLh;bqd1vscu\sRDD aQ#H)KE4W:NCN)F{|``+ISYRiδZjfz@jC3IANxчt)0E+GH)!(pTHA `ῧi[r9-y7ӳM#$Fv0hJ!4xLdP_Ж)QSx"`@ڵ*_IFZ[Z`t4(An~56=+8ȧ&jp찶9/=_,Aw%T\q_&j%oÄ5k7]s*@}B*OiG{ W޽ ϯ36"9OUT.`8@faf L/=hK6t"..D&(' /ޭMZY[^o]1T]!V\=3%t+ q?4>CBRwPEܻ99s8L^*ins|ΖֱkXF&NY02;1d  lR\PbIfZ[v,_q5#M:O==ԤղpFi6x"EcB2W د8;DU*9t]3Ƴk/Z,O]7-1¶IYZ8r 4=5wNj!~nZX'}ޟn g^^6K ͳDZI1OD&t_4bT!g+$ǂ ^%z=ۃzڱ%Gy ޗmC:U_)7b]PǴ1i린@+L1[OeRAG_J ٵ;/^@`痁7xvh3IGwk,]Et^D:Zw'I2&W*H_0/!ISS0飴BlvHL^>K>Rww孲'¸0fj}V] e--dx LJ{m0q c$F1 K5q\XjcPZ58]k74GLr2PW m^i+se&[݇>m/EgAڍ ?]z&4ަ:VwuGĝLHL>*h-g-\e?߶Vm|>hТW0] Dat89ᄐ.BXB~ʽy~ۘ4X5!p#<ANοg :w1J ˖wWSfµ-Vqu԰j=lu ]ZpCpU8dy-FW$M4wV!QU\{R8#S޺++kĩhdJG|p5m#V ?u_=xƃ[G=!\#/ŧ^=!.q<ϋ痈YpԝZ{K /0K3I!>8ӌQ\t$Jk#|J`n$ 2rjڸFh Zm ۠E>-U.\&1J]:asJ;i% I!P8=y JLUxqQM8_j=E!' 36Yd=Dp˭6\zA{"mL]*7`ju*ݓ`Zc Y^y,c\f,SxScHc_`AU8 YYŘ5X[Bȁh+4U=!ݛ/a83.\/bVzXnr`<$ kgw҅D_!![˗\J ]B])%{u:f>P($/,l].܆@9Ă Q(| ViccQa%"K/[D4Vwa9K9z߾B܄;¤ӱZjZJcza "bzwXPolYީ&]_{@C Hֱ=+1&=_3A*IJԞ(eEY@(F)@<vhFJaL!FDjZrN\ƚ{,t`$BE J!2Xw-b)a!60F LH 0 X ż6HJMAE󇯦pa6`쏏wŗ~uς0{ܳ5盏?WzSpt{oM6۪^Z9!jOti"~ToD)sR^S8AD}e ݶ=, ZNH>ubJ9]U| ~͔ " *i7~MӷS`vO%Tw ! b7B6X(r:oD}J WTK&Ps+u>PS~Xb$1 |}ʮsn]QI(Q,ZyIKJh&2;{xy 繹d1媳~Qr?LRhm>-ib54~D^!HԮ_kalr%pdjXnzirכV gxשߗ݋eq3uLoRs2bQ3Ei dUu5o!,ffCl]h%Ad^@f D>)]dYdͩ W2VjkH\ of19gzafr8F}>KYzs2oszs2p6K)FW.4:mçhJ!*u5Um|hmiOx\ڏAO(tw *u/6 *[odkW޽a"0 ATGlzlhm$T#ӉFQF_n}q;kjG5/yoֽk9\$wTW- (]tE*QbD(%]-y<2b Ʊ,4SJ۪Xl**}E2 > rh5{0Fhn D )|Vր=|>,„BWH-1#Pe2.F"Q+ ]7hBY3DjWiG7QP`ò4Qv? pfxAq0[9ETQ(x]`x)c-ï2qVWB-VP+)Z.3Yǫasspz-XVuc2 #AJG&R&{a-Yf-A۷ "36`jó8<38SA9!s#S|kl`nG1UbCX^k*G #JBt5I(Vm<7b5[.,O }H3{C\?8=bý_ n2"Mf@Vcerv]HhF$弝`d缭Kz1ykQ8*$j8uQ@[aƤ9jV&Xq ~$嫐a8 # <(8$ZyTj=Zr l-zDγ^]>}Hy=gǪW:[{h*,MϧwrP@~O''-x4} tO5ϋ8˴Xӷ~ zswDQʍwU\TZ" #(Rw. x)ל~!~7Z:J^*ͧ}Q F+ѓx;]koʑ+f~ܹAAns|~xlG羰})Yj<3C:YRX8RI, ST0Y8O2+9`/3GZox $cjBQ%i)XCO!](RM^ xCP%3_UbѰ}t5|p.@eGጋtQZrq+{uQPu~p2dt -|$9:L<]yxSOx]nw;2? ۞p'( w&+>ӛ-/ǁ7DLy2_]dI:&hWxc&U^&V|p=.2 QUqPszG5"X1Gu{H[,m;CHK*1IOYPb#)# =v1d]=a Ly/8C8׊q"߭ai@)+;sVjAΏfwˬ5''y⭒_)Z~*ObИS:/ѬgA"-io }K6j1䀃!,%tUGZ!&m jtq4)%uY&NF?FɌ<˫{bHR(џF 5zѴF.NRhT¤'#c#(_Q4QaȞT4pjBk&ðQsN[U blivHO@r̆`gx@bq:*mN1u*^7|: 1[3McM݁CH5{]QHmPw}a_ =42j滀zuf1-^ԣR{i p<)4b [2ZFڢH&UCFM sfH'B)A()MjP;K»CN{k%c嘥ܢ<@fxqxF 9Ԥ&SAAZ@:~!*}Z ye E8:Q$xUHG(x qmM),*Z0w@#wg'/5u-$U=?Y)poa K l 1 Mo" # -NQʃJ8eacm<)$i8xb8٦WK>uy`n*c'or@AUu9PEp$I`EVڡf e U|JA*s¢vا51_h#c=Pi%EǛ(ܽf1 U#3Ī yu>-7_S_kZka{/*,+߅g' }o¯߾w1|Zf.XE#՟+oW吩?BQE C2ѧL _я~|57R[B $PXVk5䭀C DqRx ^5 3 kIZc'"H={#"]_X1@Ju`PF(c5kt "J3E7*{ac!HfqHP[j̼(4]pz#T&ܻV+@Ѓd=PpS(~majq& Xۛ8KUSlI0>V>m(&iĪk[YaV`$k_`m"rWl9Nid4ĸ -_;^bds2L`<N1a>"2  o픛գ]?4v㭺{L$kq&ꠤEL^"a6^1t}p>4ʨ\7]3QIW0\B]9TU;ם@ļoݓ3t1u:_]9tE5Z|Ct fGmdƶ0o Q@&~۾+7s3Զg:cze s&뻁!Nr$^JCTWìv^>H⥗OCˋp7${NM"ƤFʎXJń(^A pa0:c`:f[O\~X;10Le w%P)ůa?Fߖ-NM˴F}_Q9C%;M[w||0E7 &Nh7qBMuB[̦c=KXSí`%G1E#Q}>p_J~-U8}(Vo=G=͢r,ŭ*7kHӺw]wPTuX KDFK* 5+ I׎Blzq2:V ݻ-.,Iu@Bl*h'1 @L0bU~ޜܬxdPAL-:r|-( mFͅHC$`A,WX ʜԚ180r·ȁg%ݮ[Jܘv󴃽[sδPxG=Cv:49&i+8azQۦP)& Y+8e)KyYJBఔ1 b̸㰺#csn3g[`hByĖ8Vk?> *%vcOXD>"f``Ԧ5OpXQlr51lbk67~z}/dD@Q v)*'}[7{PLO9K¹锽&ُrRT7Oյ `T翋,\~>rY5ޕ5qkK[¾*Ljr]q\X%ls$&l4ʃ8K!+z-8jY(~.&6Qlp@n|Oaoz۰^ZR&PV_H*2$Yu+Hn Ɇ0}@E+H)ɹZɴ⯆PrJ]Z V5eR[e}52ѿ#P#"iJ# &di->~Wv:]ٔX]S~tr}N}mQrtJ[הZkF/9-ǤJ$x p;(ENwþyv֎L"XWHB> f5EL[ B(Bzحj'B ?^ ?p\[)JZaĂaF ϴ18F]isjpjUU/1^)U1 V$"w޷M39iHU?Ȍ` V^D>n:Y`G^6z N?Dz܃]DhF"UQѓQVKoT @9"SkEY R$ K"=҂XpŃTuI<.1P}~F>Y)nGNWK)Zg.1(^P8CL)Q`MF[b`+%Y+i/'0˂ȝLbA/t5[+E݄\;@07G`]^tm z,u4`kq) E2cƈ*cؖ?cH*EWai X3}0단aC]#0q ۢB$F{3 c"ټ#T)3mAn'yz•hjZ7hZ4.d`20%:H!q7(o.ReX] Km}hkTFl ֒|!4~o(iΤ&Fo"$cFh֩u>pfF) +<D,AmSwݗj2\ g }:B 8(Y[!Uim&SEntm3rXs۰z5s|*qm)P `6Bi aHzc]pU`ƹul흼&(E燏VR3d>QW+dUZJ_gM&7׽9w@y8oЂt*a0Y?8qe(M9~TNUkudр}cy'EJio#>֜ՐԞp{6 pXNۑPbJ_9\dYzg {0hSgpɑ-.7%79, ^}v8{@B >1: ?Xlf-VɮC4BdO9Xu7oY1.u?QsyI -Yd 5[(j;hM/|+6wI[~%ivӗTg}JXIS|^xp7p)Dseg]kIP={+,+)uW+&DG9c2uQm^`:eK:4=D hnsYG'e1ͅtGsWXZMy Y%Jr;5p3OS*N=h2IKrGuQBHuZ';{f8Jc6ÒbWgRbw_Ԡ}L'bN1xjiMp}T39[اR{۬SGRj)AV|nԤ{  JU=gA(Ga.m|n2i|ZS x\k JL|̻&-WL.ohj ^93Ș[񖜉Ě9xF 9 kRMk.1"ί>/C=<(5>+hUY{tn!YQ'rܥE5(W7*KVeSCL J*}j2ެwGϏ yRW~؛o]v0gߚ3zu KJFÁ{_oz^okﭐ~t=Ѥиc'wm#ϊ$G+#)R QB (͂hs{T x@\S/‘925'3#1池'#1PhΟ+0YP:^wkF|JqTۀbRRcw:ktjn h ut`Rr\z_o0%Nkt /%aIý%iY.1!܆xvV^E3=?J-Dw>=Պuuդus9xD1r&$6`qcVKm8v7rNpyu}lP*.]3 ^5C:\5#BlzM]1sQJoq۰.K,npa8w˘>yZl)vw&.ch\$2sd2ws{\.Qg̅A,Q_8S؛tSP}9-Eff L弚C^qxxK0שAܨ\RXɤﮃ|dZ;7-~ O`ۋ`E=M37 &^-v?q瓱;dFTJPwG%j!zM]B3;''`rrE9)^fm!U3)\m_+Hr!ETCNfL T^*}4LJItVneBi[ ~T8Kp6aF:eiφ7:]>字՗|9}*E_"%ұdc)LSPDpVvݘºV7CT'Qr{p!N$~j7XQUkۇ cNRx*\cT>X/1")=\PY-qBS-t9"SkEY RUC/J%S P\ >iiX, 5HDhB:>ه ,\($jdq IDٹ(8|c?s@LJfC'7˻I-!0{ʱPDd3^5y7 hB@RI.Tj0Z 5(sE.|Ltw;=u,VKl𠥸,ĀyP巈m'U:+'OF+/@^ j89/0sd{`DJfFp` +f Z8IlװUk)Ip;bTZHtJz`)"i8Z~ń [<+['N DOa`s0! udF{cLt1N=xgXJnt:ЊUrX:cwԘn5v%f$SD h 58DaA OHN2aĺ]:SNԼ {h0!Ӱ}Ti?; }gXROpf$]z D ; [.LHcYY,x `@2eX)1ZL"IvDT=uٮ#d\2y*]fíU!ۭbj97_ 3eTq}~jJi%>O%ܿ5'gŕ3'IVZMVSNٛ$2Ϟ|8Hl!W_³)7W1ɳޛݰK;,񺃤gr_ĦTB0gSpˤoú|{ {E_\kӁչoG?qh>o֣QI504wӗ'aʪ# >vAarR?o5?w>}˛O߿#[ Ю.Zl@]~m$k$l/3 &2hus|`gҟvS>5Trzrs sAQRnn5/l[?AWWʛ 96 v4KCY!s},[^6S0rYe V?YJe&Err|Bi[ێrwWufH0cˏ]/jNג/f'yrx'TYaWi)ɳyl<'ɳyjgSR,s"DLjf<)޻xxYv'UNd.;( !) N2l3$ņ[lDPd*&ȃ^J^:@Bt9GxUatS2,'siUP4s\;YpB /|jHFq}03D`;೭D V߾1 mQ`Z3&Z;;v={ !v}IإcjXT&W-:ZH$}!j>x  EʽƯuD:.F'*ƊIEXݼ'}igUAF4R0)y}~c~v»Z3}>@Z;c+g)N >0OB{$ l%ZAO>+}gWGq#0DI#!&]&'qdHz(L\hz"iKpD ?Ǚx̫#)2g^(NL|,V` d1TLTVAkj˲+' ,)9Ū=J1U  ABP5@#924'3%0Lgy'1|K]Ǿ+Ny30[KEpa䡌p"_&0O %#[EJu NXS$ AuÚ Z}`~4*{P& 5A mFΓW>Ij@ǂ:\=0hT hae "~2`MGկp1tM [PjYd+X|p\`%~=.@^. %N fSp?Q1)0q 3 &GU?&)٨P!02è(MSݒ\<_ 0㙚 moG{9Saیoޑ&<2X\{6sX6[ei-*)ߖEȫɝOL|(e1qfSNRs*g&9 ewᡍhP|)nVhy(3hE;Tf`3iAR$Eu8X q_Z#sӍEmd'MM!3/;f<pi$ r*=Nͨ5M3o>L[]$}矺}۳GYۇ6I$fkiͬ7nnG_/I$&$O(Eb! $$ }eyrhǭkK4޷s_O?,Q{h=J>tS'sdxSskɱ2ЪD0iȤu[rF/q :.i[ZƱlom'{֋$FmZ&yqnT[ .EOx4c aA2.S9{)g!ۉHWw/aχ ΫQ;Gz5X䊯>2e,yhߒog2m?'RݿQd`7ɷgm^=dK)RcaqO>0 x*ؗlLŬO*^ \r~^խ%ǿTov=zyfܛ=.ՈLl)_/+#smĵ^|z2/ǹ,<)k1I!4} .r.|^b@^KP[cyΓo.E?ooolôN"|cs O_Ll5gFA(~l|W~XWgZ|htZ_\?+у&}Bp` P^l=`|ꘗpׁLh(ycہ8lkIkpVQ|in?/zb)0lWϸNK#2loF&Ƶ0+̇b۟{:yOj)kVTV]>Snn[qfҋ-g~Ƶ.?BcuIY6grfnw!T oT(@lDGqσB٧0`-x~ R7޾כA~0l:ϥtV3 yMsa"|'>x%_?)I9 {GR|qM7$"Z#XzI?|p8GG~S؟lF*{"J7ג'bb?u#og\”~^"g^!袦:qS,6괿S͉HYdx2)ӊ\jb{*a1RyӒ6{[/RB+YCmM9`ҩ1 k9L2)XYj0#$$;H f*9@ uO5 td$+ eȂ$dRE2o~ GN950v3Ѓ"0H(A7 H uϕz7\#}ݰ]B~x:x4BzeҴP;wY,cY(wc2 U_qW=! lOO d eYJQ.lKh-tN6*'VB;hU΅tЪ6[ -ZU9öZXc1\b1`DMҀHxjHRF >:xJÐ7+fC%bt D)`)0R]J3|`EG5SD 2G޳^wc{F)ARYax;TA7SA l3k)?TdZ1{qd BЂdD\NBc60p{P̿&$X=`t0<;=*f`\wʻf̛ i˕r,>Uϴ=a'<X9;vz7֊+ yec,{p_R<0lF=洇9*FIwj<0dť2q8,rmZğt_ P=qjg 5-wմΕxU]U*]5%LXUUʏhg61K2TDV|90ϖRhUfG% ۃxib*x0]w% ZL@R";1\O9eF21^yd7/kXU( ))9lhM8Ԑ1A j@& ,Ԉ#L64+y5 1A=dx䏟mYnHg`&tm2h3D7pKVס2TDZ |͡xRs~JL| Qpl(2JLG#_἖-E=iQs!+_$vmiNhKumiڡ--ZVM]-5#1JILI4ty]@W:Е VMk ܑ%#KpGV;jQI_\Kj&s zY%[U $X#DCJ.c 8l9koQ8P7Wa-kAך:ufc$'^:x&LtmۧNՖqJ$0.`d7d[#׿ Ӌ !B6ܙP_A0&O ߗnLL,񦽭Cm2}-J>誑xL>cVJ)Ɍ 3+M7p:@y| z7 0̙>㐨C=9kϒVDu&n}<0 v|GE}"pWq^dڴ]Z;i]KG >Ƭ븣b`- R҅mI/Z^c&6E_cZWGW: ٭Ggd êQG]ϵho>Y$-!k0&+myYq^b[t11/e[ތ~˽owfۧw_*Ѣ|̙ds^3gUg!;}c"Hu=OKiK~vIST{h*gm9zk=\5ҌY=>EW{FO*3cyFp߼$H"=ܽ_w[ۣɖ%:}pz둛0z7u2)#f~ހwAs?Mwtx_~M)[ua,52[ě wwq6ͫ$6 ,WAH᮶a/pE19E`ԜnQR1f~"k11T寛]O#  T8\×8\aB5ƾQ*B *2=b1U׳5a$Oduj"Wa͹д`$!Bkx:A#x.K%2E1"ނD<єG EQP1e%mApssӼ1$zHVgϣ'߹KVLJu %EFpiK"uw[::{r:ٺS[Mp"ݣѵbԲtˍY|;!ϺBV˳%;ZKs=*CJN6/sCM >|xE}<~DuJx4NH,*Ry%}UrA Gѻ"6wӎ}7x֒9]{@NhIꂶ;'QHK1;T.FE"N"6Tԗ/4Ke EHR^]B˼vj}F35VoRX!;NGg He;HJޮ4} K?L-/}KK/W)<_k@~"_@>? -p,GYeE&w4wB*'퐿&T&G{ZM߮Cv9{20y[ Wmmd6mmevۺz-򐴽!g{{~!g;lď~(-)[B֨$Hیa2˂Xofvdlex$_qyMU'oDHzy¯m11Ņ㺭9#NGz|~20J$kΙqM٤fJXzN^Ze \ >ݩ}Ϟ| s)gsyVyBt3vd*Ѽ.Ċt@1 .FdB>5UtT|1NHNH^d_1FFFH>O~p:><)$>O[R;J2Q͹}d؝?KH\y:gV9)sUqSqQbo/Xm: -,RJw-Spֺyt+X:J6i?'bD}ԈUIdZ 8Tj_*Ъ4|@ #[2  DI@€B让c 1 1څn:B "cDTI3%4B(8Ү& ' dܕA ~#iUHD-jqڢecF% 9AbkcøH*fZL3%5B ǭ_.+rQ5cᄕPQD0`+P&bLDr1[] g62j@ЮāJp2 \kl^!H_r"&&XI4iXo gOcq}9/w= Tp3bP``J}X¼"MsOZUfuFHJ@>pe0+YuBH{Z(gj (} 3N.U%ScsyɑBly//uqYv`vuޤ#s]_ӹ̬x\YE_ٞ4~k^B ˗g,gy/RBdOEa(zWWh֫*Rͽ fEiYFbʑhXD -9˺ȱ$zW9֬+V~y't&B\m^|xnȪ4ᏣW`d]-ɇzm7d!Qj.˰.cL'Q+kLoEo\8^%1ɗ\3qЙ {35,~vL}Î_j%*O%Je"_2咾K>!YKFhk.-sr~0Rh~a4#ZvPT.8Z쬂;lDPdRwC.V=Xc6^~6qTYHIYtϼ65E݁bs-\Wqdр}ƣG!t60ߖJc;x |Fd#č[Z*(RxتpMtigY0%SBh &?ӵ\kϱCfj>)! c-z|8`'Q I-vcKT(jLzt**j=>=,{oE628-#q>@ܭ?0wfJ,]%FpUٴr8t[Ņ5i?8KQ|oq?ֹ=qMy0 ٭0 C#)2Θ.܆QQ;䳹5̟$ gw{eKj!Ed"!erQv[Mw^Xw WA2EOn3h]%iQ~Qx jӓZ|?~U_%Oc?܁*)ka,@t:ksIh*J#VtC(QVDG^}栣|)p?yd#K.QNȉ2p!14 "f: :@GbtQSu" [kgh;98M!r*2ɘ*%6n|u@Y.ЌYO , D8J^R@A|1B>t^ ~<o F{Knf.zns !"GBzB|G^40l @ 9_Y>{z"xu8oVӎGl`4 8,#ᴤ<3?SR;߸-RrN*CHv?/~6SduF_r1]t&- (0p`>1lZǩLګ|߰yG@ssLyLe@-`B)Ↄ<wYz/\A&BVAs 1k PX{H\\^kZ3q qYRA@ ' mӝq:n 1 !Pb$YL\lvOUO||0_4Je As70o~9坭&x1;dUκU56@&?hf%B OZc{e U Y:`"u`Eɋx&GvX&4R[_$ɓ$B'AںG, +Z] o/Hչ?,Bz4{yQ{,r^޹/yгu>]Oӡ=l ySprysهP^VՑR^aV1 Rvr̀e 6ʰ=:pT1QTh\kZxH&Rʓ'D8+Ne čZ]KEsS Y ^׹Ю$.b<f-8h(lQ;![q!1i&d@`)'N*$IQ(8"6:ZI@AJeRgT7)Kjl#e Jbcٛ5ߠvf] Ƕ&EL9)"߲w]_Xrr -F? $m@K"ſjdu=_x rA?T,WU.+>-?欕uiw_/~xȋ" ]m];l3>[HNi(zI\Ѥn3wDNAnͩԷԴY5>e"ȖwYrGOX ^i Twc[]RW7?3p'-`|UsqُZl} \pgFux{OkBE,ԧ-(e'Z>՟of%fg/; }}N>y=빟'mX eYBT'M [)e 1A0bKknN]HHz*QӼɘ5s9',IQ93]}{\[#$ri?gU_?7Hrb^Z2myn\.{kvOa%+_䴸^fU--@q5w/.Zɿqdtt/y񭧟~bӖa\biF3ͭ.}0ʂScV%|ѵmkvORV47=-[p깯M@$b\ͥ 16n~Rw&F S"2][%Ũ٠1IG]@!G0ʘ(c^hQL;f:zzD$5Q'lVUoGF]IW|IЦ~m#x&zC>  stچ+eÀzUn&a+MCse E 5=êjf865Ïx?Q3L 5C=ޭ߳AХ׳#-C%Z.+G@U o )urNSx,.ܯF Zo7x*K'tr.gڂ]V QZ|O1riT̙9S19u8.4n<xT9bP'G? > w & bzShw%Z4UV+ Gk<ՐC iSjb$>5UT1N=ţGJ\V)qYe5)q;6&OJ{*yԻE!T)^CĭXne&Y{Q D0'ԊEAdFuX҆)\o`^Ί$s\bBMApԕ.@s$c䥤73bָWdgv%yVƱMBGa^Y Ȧ.$Dab1}>:3 1 $U#5̉ Mޠ:`{pm.@M*/gw!M^o "tv\աmv.RBkגszg;J^WeH2^>] `P5gjp)g NQ*b'P$nKx6V~,UviVnm2{brtQu6kM:.>Dn~kQ#\5:}GĵLgNl$[=4 M<㓭d5Цlu 3f[lդ=.G# *{'h(0B}tT9X6{xsL& ` 뙇1;ާ,MδI V`o&~šIA1ր8YÝ嘈qZ?E2 fE1L Y100K %:hb-F=`e zvB8g zBn<{Vo{F7xVّ?ڻ l;κ(`g]0& uO N/$ ԩ[àS}GI=`} `8 Z]ce:jgdXdNc]RNA(C4(p3:P2L }l^8Qr.}kjl?EW{ptWUj[Y2CչX_h+sժYw~$R6*.|sE/_-mZ]|>y|Qo^֗ʗ>TERl8ygr"_ߏ!]m WWotTa47tʈ oԣsh=:$ ݀f=TGjtBKMK 4׍Gkܷ}< H~޴=G5e|T@ÆSZC6nƨ1hg}A[hxp}NJ Qы4b $]f;\/xҋ щ֣yR],%"XN1eB}4w ^+(<~\p_f$祶°$<¶9r(e#_rl-P* 3Fx"R V ("hEqph'7L( ;Dk}OB (]N`BvM( !qSTi1NO)6J0VwZsT=5h\}J M[p3mL^Lp69NowU=39S!CBU."OK0:@-KP{\ǔoLJBC2N%tGI0Ǔc89C)UZ늲RpT.iA  sPL. `)zj{=dvcb5P䇉Sm՝::;M=FAnB:v4i'fg07,REAYv-OͶlhVJyʜTDNu6>#jSA+,S^#Ug䑎& 8l9qҭG,,)*U^e %U4gglaiN:+E91yRkK"H/L'oY }bb lE }h^6At.(bh)9gNJ+ @YKYB$`X4[H\-M&[ VRvN'M4* vg@ˡ, MK5#IN;@_1VnE;d[:Irn^o:@Klu!TK/ 3XI䊓n8y*%D -۹HF1})eťi7 ŀ}sIDdh&oSv =BRٍ]4gEgˎL9b./9?;m|*09Z*8gF勬VwJV|!ڕ&4RlMf*D/4I`9\x2W(40Ĝȯ ˢxM\ZP<fmv{"]样2Vw&O\+ڈd0zg'Q %Ign)+iu &y&Ƞ2&$mXJ&M2׎b+3rbT*)1@hr%NU$ w;0~2#SgfE1(qy GE8RK;hwt]3y9O C=رB)7|ߵX6( ޳?ΑP| _Q14qUlviT(]L%C /6cA&"lp:+M`{"U%Ђhh=AQkry"e~mBۿOM&fDK2K#09lDL2P`SR0$LcvekSw%שjYu[>|oRNr? 4'r/ Ppxέ_U!:v 5R|?ذ >fN& ߤ]?AHA0.R+ }&)0S>p#%HM1 L߂j퍲xB=l4>3' $8&@\S_{0|ŋPB(M \5 JC*竫4M-N(Srk˴߳ҒyMZ;l&ZR*t;;]L]CZxA[p=_Z2Okk>_[yjр&Eρ\&hD<98E&_>o6Ks@4nԧa5wPn81u#oz4Dum+8VT@"Y&; )Cu& S(\,2i}MQYUwcAC78 -H+YG[IoidKr4|k`zz B(u|y_:R,Ә![ etͤ[{FZ۳ѽ˛ma|m6wr j;BInYxz3cYfglF3Ý`TK[Q, XE*b[lt=i/= tj2ã󵷺_R54bie`B0&`nM T˘7>*I $L%\5̴J&cnPxE* ֖ ~)WLyH&3^k%J2? :),I)4&Do2C ǥ)pUEd#\ E7 Z&]v&qj!9U8* X2Y@ E(FF@̦# %g`c3J(^g!G)hÒ$wF8g8pfLH,99^4aQ 299X?StLaC ,Mf(|^`p<>w~LC&9?rQTִKc.B=b~k`JJ^xw"ڃ#8Z'8c:@HMZcF`JjjaZ]sZu`{ .۝5߇z]qФfb5VLS_YY z9v)V2yAz.&[G4bk$6gEiҭsRySVC~,ډL+?|ɓ g_X ORϼ(GqWdZ^M]R֥5qGXlx7DSK)\{F+Ѩ#EXqga$4(_x{ nz1F73.t-*^q(\l+%=rQ49F4SZXX Lo%)Ar'%JHkv6~OKCx1܀q05]&9edޢ<'VI6 #Եb݈S'qBHMY~N#q6 b( s|;k7rn0@47nԊuO-CG۠o EC R$ek8!˟i4ByoNw6" &'TM'qA sa'Ou0oI0K>7ۤlrlبVpCjiKmR;kTQPmvMq[ˉ-%1< K*%3Wy־Q [{g)x/QiknE"\cM$ *[&5K:QNRϊ땾G1 ?dz sӳp=ޛF*Jt܆vh`CS.{6y<%G C4k I=sspGt V+pD].6Sc- Xe.qQHZNgux:xkeG3;am۠nsOxY@4jP:} I ܛ]!SJ(b&r5Ȣ%$i %h 0dJ0pn:c4[vĨ;ͬ/u.C,f$PdDF 3Ω,5x13J2q۶WvW.=UuMv&҂L"EJ i$H:$Vi gQba8wY"@$qiH)bC&Ҕ& m"+0z>cF[Qف5W5)֫e]7/]\b8.EH;0t[C$ck5bo|DRY",jo $y)1 .% J f+L]h.(9+v.j}oe3>w_$T'>,ꔈ8Y@VK! _0VZigbwٵ_h&!B-4cABi8kʼnf{%]r(RlW纼oTc$%wQf*6/bC ڐarA]=Ţf[E9~]l7(+I' XqOKwJKkmIX$Ӏ+^2"@l )΅DUPr%G˔p;%N`@cRP2ܝwM4B2iЙ>wB]Iouc"=H/e",EN'8de*zBĝ)Q:ze\16rk.v|~vc˔6_LjwN'ck_R5uߦW1>7R4ߝO[0Hp2=!`{<9G 1CZ*pbl2 a0P!DG=sF;Bܱc%<?VW/ΎWG?{Mn/3Ϟ.}'=nYd'Qxo=WT(1LM-۫~=)*tXUXЫ}ld*t\[ǫϑo|dp,uӠt:81Yُ#YGv2p`:dpUATÓcO>A%~0fB:Z@8,Ka|v}[qnuzՋ[+w69E6+MW dޅ3űZ؛ yl6ßT4ƸvRNT|g8GЏ{^W|oze̓?x;<<OmʹcF>?awwUzռoV˃j-Kٱyz뱻W2rV ,9S!VAvz}O1zB<< WZ=;( Cz1~mUO.~餩6=N 8ڑ?Ռsz_grȾ?* ƧC YRҳ.;9A_&V(Vnd›1LI}G3]dv.'Uw_Op/\_;1lH?ÝO??-pijWewço?#ɿB^r|,ĆpX~L(&)odgCiy9r쩮GWyv2@˿_g~^p9_Z{xgٌwgy~ߖ^s ɴ/ߒg ͛p2p~ dzy9d+`~9cͬQ8WwvOnM?0ssx1[-sL_FXGq?g|2cP}ൿ}~\Oa՗_^<_XIcrzĸ ?O v2i.+D\ݜ_[0-_n&q7O <>.[ܱ?orf!;"Ujc$L+*u\f/+gĨՅJ︿  s1Pgdd!͒1Ru G ϋh|~?ͭ| {mEi~ps#'Y}E9 q޸ěØ$@ZZ2_:0a-^Ƽ6S@(;첗}y0ޘl g*98* *B<f6 1=thd;eEMȲdZcO)jgWynPNgqV|ώi1<;҆&I?dF_^,Oا%|P{e "v}I,#NL*\^*>.,E%gXvt@[]nc7ҊW??z! kVD]^y+F~1u#'xq_V|wD$MRl +pP[7 Seneg˗4rNKLSƈ@{bo)F/X~p/e@3Z}5hC]gہYK$V:MP&#6c96p!A0jj^r;I̪泧5Ή߁U'Q*k=B!XVH B2e 1JdRF(Y?m^>'9KM-0@"B# K)Kc:%w~ <iwܴΥNKU.19°#֘6;v@jCb& ZF򘭇5'ZYm9"F@ Nک"ݶ@IHɧ|8E~5oŽBFUf@ayfQP_Ky4jW?귉z^j9m"GJ$]MLg7 ֲ{̖ ՈZHH x05"rv%AkȁIPX2g] D1A#uDɨ*Ϻbf]QZվ[ZM,Րes-ʼ)V ↼(^DbHMi|( {Asp%8P&'xkm.!^)πSPlp =wpW{Gr-XwQ.t5r᭬v{ey"V}H(u!wok`~- %G<ɺ.+ѝr0W7ZTQmqZWs'C෱*YCVAS!5pQ.R' Pri֧uxʃ_7rr5RКt\kebAăL=K&׿2G>-z2*DO7zVLj W:>6tƒ4)a}ӭxD5{?ŮaSN%B i0DK@!;0sLP0s c%jUQJ8.{ҨmiS6ɽkӞ4n%;Ҋ(ޝ[zV=.ߦmGo}X1\Qq{l+0IVO<48aаD%WZWD|4yPc(Ѥ:[DFwݺ1ӧ1WwG4xMū\݃ZKOZKs'V#:ǠPt:GwNr0:]mRT*ՉQtjD[{jQ~~KN|v bn ~C'mHq=S+50Dͥ!VC#n[U%N2a(pJ^˙v)%0q.&BA*n ZKJF8Mဴ 'Wޡ@B\W1Xg74anК Q Sd'^AN;8Jl&aDAMK,f"@v6? "*_S="1RI).{3.q=Lz3;-GK@[GgW3pĨ;8܄HBzziɑ$ ljk\( jw\ w kbӧMȰcP`Tp34e8vyoF|`xNvfLpVV\LVSh^+4װtڿdA61p!iĨv~c1yP 5C#C7 k9fR+7@XMk-D"]`奉h6hCj ^#|6]*-w # uLH!\{%G:*G'AJ#wUPzOy*b,.PGuNȓVFiE'+.Ζ4buI 7/>i73Wb%{2Ghfx{ُ.B؟fN_v-Hgv5r1QZnI6Nd[ Cqk}O.,`UO1Bz4/5PT|nܙp@Ԑ%sBlA`o38FzS !Hc(BXh j)p Mhs2룒ZV3DB ީy& R_*ɧOCѶ'qFϜ$It0X ZŃ -cZ#HqОDB,%N8L`ɔ`\:`*oFuH#/㬍%Y W AI {+jEPbPWs?[픘 (uqbS`NaK8iPjuff*r$? '2@pY$1ǭU(8(hsBVcV8h3:f̛r4h$\˴^;*6AWW.A pOdb+%8Z$6z Q2 ǿDW^37d,8w\n:PB#h+Xwfo9R`E7ejZ9iIM(W3NQ^Ev$;ޱ@y9 (6&ʋN& K?EL9yB* #INl%1 JhCe/EoLFɓY~+0PX"6 $M4G!!6x{0_9P 8P6Ap vKBYu]{֭]^2 L]N97k`M[󁽘wt{ "$b{%jTP0L`2mf]|4A?".p1wSpËJJf[ RPAʤyao 5 ZH7 )THG> 34xIqӋgQ/AJ^eKR[&_e&:;'K_qPY;* 1dBR`9&C\i`UMdZ <͂J,!Q)Ka!Ufhd񚫆=SY1#ljH'{t_eOŧ e*6Ib͓_.KIΤ,b3MwOwOO7[uw8 $~6oDTTg։*o8Ǭ ՘=q,HI !K$ 9RDbq±5'Z98"FH ^r5 C72P z_a \DjUd6<2!zhe Marf< Y^(KGz+6ÙZLN&`4i%ֽDJsCV׊g !dYʳtRUXtSGqw@M~fuv[d[Kn͋4tvȪdNoԴuYSA-ru⭷uʯx$؂d \GQNF9J[-Sp/'ݿ`Dqε0p `/"VXo8^']]LTefEt=̬[4o_.B' RM&7!kDڐl:5{'x ѳ6G>[TZ1i fί> dmE ¯y.TRo)@Qm1}tŵflc58G,;oB5>0'j¢5XM7+L@RrY`X#ٜYq?m[4 R \9[Я'MjRtξb mr A &^0uVi ؝2ŸY t gsO5[2Oc06 k Rz'ҡs\dhp킩+{auje)Mx1)~+9\'lts}WxJM S`aQ|=7IS%[itݾ| \u z~zj{l tG\FIw/j ᢕ(k& % 9fSƒ%Z^X#s#PAy{8;Zx'{*Sg^lf:ޯì}чe=Ji.t˯,f<>;7i<:;{3;M6]%si}H OCS>J7 SM739}ᓻac'Ólgy3pxVyQmяeO??y,UxMxd뽃==qv<ZX͕d%^Xא2 ,jvNOD$Wd`h>vjԟV=Ӈ ɦVa00-Ӳ0;Ya掇i=W̑y ɷyl9_IKOkY7%i?>C5 )[{؋)ۙ߃r--yXVy+ӦɟקnN6pfx3z{op6lz PtsfQo._fp95YhMj^:.C;G4|4!+On|;:e˃DE~xB뺛eL<Lbؚ~: EA=w&<#a|QǴ/L7.S;*^\Sun}k4o}>Ã^ߜ.rBohaJR}o?ݷ;q/6>kfXTi,A;uʫ-=Y(;ۇwuzy1t^HyzaR.Jލ 2޸|d0@kV.OFѼ߶;Vvw$a9qwo_=ufOOMQ(K֬!EsŬW?,j7`4b{^7 >l`2Zр\~>L^^4V-*=So 5UؙEIS.|@UU›(Uab xkhPh>dQO­B:cf [þ$9G}N9S[S\ ݍH !"RAR̠XZWy8 1lSY Q%FDF-&^eRm7\[qv랩7}K@C(8ZniY~Gi-~Gxk|)T &D:wƯ&E'$(2tV2wzgnIe6 F\GlweeBۇl{$0tpl)NW1H6B Zy qB1]NդGUڢ?uLz4,ɯ h-R+J7Ј%:ROeP&lyLE'0<#OB+QNkL{JqawVjju,ǜ%^f!t.>^ki`:swܛ>9UT|߮IĒ|Qc 3ņgDqӂ|-eUJB?)x'dpe_|Rd&3 ĘLfck!g.yz,K82wW^vtS괙[=dpb: 1Eo {E86| '}*sz=7aJ0% \#ZAZ CFO`?o=Q<0GAr?~@).^ũ+t?_T(A*[mSYHL/0}LK:"ИckNJh>eXYgȣِ0 z\j8\hWiJ"EsOV.0ym2D--(Bz|tQӯg҂ӉաU-lZOOgZf,M6Ncʹ??ŽT{*\6- 5h 5 Ҧ #!%F ",@wS 0.r0/R@̶TdޜL* @TcaHe2< ͭ4[H[Tu}C}DGVKtHtt" ':j8]\Jf*CQft[#G" %#b )D$XD17adEB R 6rU5#WnMAqHFGaY(P1`D!#3,UA[$NN5 FSX 3¬EvB@c`}f"${s0|X|$_1vH5ÑC3OX!!RQ'q DdVJhd1.E;"׺_{t5 +) POrI{Qgs, XX7Zj+E$$D)kg "Fʠ -`Kˆ9&H"!B&kvq+nLuV:A8_1vJG%6ъxlu9 ETFIIE@&vXU] XsWxr9w1&r/Rb"s"YL%cadV"){6!dZG*Szm%@,B:t-REѭ>8A ]:C8 d9fPx,.|wtRp]s{ͻz!Qn` Vc/txD36mz01蝵)#/<6zÃ1^bEWpUS]d.0@kVMn*F)# )" Fbi e ̒@ шȨW`cL@ZlZo@mѭ>8)HEײ>'irMU Ka@FU(b?8Ps+d =՚`~k8-2PnB37s^k{WHeG4۳>6 Lf]Zߠ$Ki)SWC#帩릁*,q5+Wp.uԬjDL\+~/MCTY&dB= ;Җ,K he`PCZV+7fP5 m 'Lwb7$EH4Y|<J lg/9~{ˆ"p\Oj]Y-(_5ZLk+je)L`:^Y8 |!cJgo~6aE+Pj.UUj-%S 6yƁ*h9)]PP BDZ9\L]݄) &5*<V+]T\[ mXV&[xm=RB % &!i*KÔR;¥ !ÿCK]UB0  W8jKԥU\H1g{G|IPfdhé;MSV4Q˼wF9k-@˳,>H"2rCRTX`@r[+]nU!IUՄT0qQ`ajLM,ZLZ9/z>7!Ԓ57<2Kg42+ s|mAܾٔ} ӥb``jLۂq1Oۂz\ӮrB(zҶHBSen&"s-p1wj~? /qBNȄ4>kAҝ|tST,+NV6ZdFPFi.I1@sS(Uy-xQif j&IYMl aeJ+M#FPBCR Хx4IWE"4yuzT: 9NKKB JU6 STZQ 2*]NlzSt֪UYxQ=~ۥM6M@󛨉MJ}|y~ZivDC"'Mj qdV2/VFXQ7%lIaY3Eݐ ?5(tƊfSǬH0]XJwFC 4)Wa~>3$VaFPa_. ~<M"TΒwH 2+| AZVtG -)QhWhQ,+#;Z_m;= Tww_.= ی{e0f#1^ 'ﶷ"}(r~x8B >,za~Xl:z`q5ˤH9愍e]<!{Yur~)ۡ2/iesDZ1p"m 2cue`K̓3Upk)p\DLKg+k5f-XJCr ;~6!lw9Zp pmuŞ3q~+`8 LSiTi1a d#ǯ@Zmd4; :@\ׅ+%?9gIm\c )L-C |!HVH=~>koE}2k@[o]ڇUg71} 5m ؐߘ9D3>ZO\ /Fɜ.p~yIeiE9z޳F;"ϡ!s.)0SSyV̀!a;TfηVD#oO/ݭqYy~CH=`dEHȒ{Lv]R+P 5/r[6sl9#ĝ(3j:QK;6;6d k̐k{eN AW[r~}ziLJu<_PyS2Ww5 sOGVKtK:JW1_O^kPe$ˤ3x\7ʋ}|}!G(r?nGզոq=)guld #ӑM"*yP#nʌIaΚĜb“tN- $hz(άę S7U|`mkpj%~7,'.T ,@|C5X_̪啻cN?({80bdGҢ]r< sO1sL8ocjk>s8=:`G<gOiYNH.!i3FtJ`Uc;P"Cx"fz:^ߜ{bS_L# È0 sA(%pQQFw|6ƉLm9s 3a=Ԗ!&bŸQ,:]ol1rlD=an,(bm;Ky3u bO1*ZmX1W&TTJC\='j BC1Gԥ39yspDŃĬ0(uԥ{!"& ?IEqLj2:sTHԬ:WDQ1z2&HC3M8ZC/&LFMwӨzCx ɉ-J`1Kᵫ? n-9Ynj hw*($Out%I$myOǥQAw4, G2<ü zio%PԲhЛ).Ie&FltR/fZhz6O2vV&kxhU޽~~\/Eywc̡N2ts<%O RMpB1$ MfJعӊ&H232y5WnAّvezږC8_LfN#*H8hW2:gqѼQ1:h|K;=}^D4O 4IPttZ[o/x;o/Tct4.@ zJXn|jf|uNӸEMVlH Wtd$˥pRua>#R+aUUީ֣MۈA6"z{bۨ%S CO/(`z5agGki8ngwB5Nt/޿ʈڌcd!.Sj6RXG,Mf5~ ww9-X.QZAZ~zFݼ@*5Gdi!o / a4H5i&sqcHsSb׻yZ0J8%sT*3_>!6qƺ]6UqtC_j@-F8Vs1vܴ^b]c(7ӸVr띱2>h3j42ΥLĂQ&FGJI$zt5)YT'Cbް꬏RF:4GOԈ풴4CLp4QO7*8Tϰy}FM"fT(ݽ0IkaDGTxǨDP֩tx1kDĤL9FԥϠEY1:\:(]{9dj߰?>Z|R>w/˪]-QN[=f+ǽh.dSB(8KB\Wi=&pc>\50ipS[-eBt6Cpw>Qz{<~oW; u {GL1Pj@xGIw{2Tsb3su>+=э䒧C_{ jX/"a::(.}L̄"Xr[:r:.FGJED'rq#jۉO-ɝ_}PbmP?!`^-n?  ~|"6ҴԂJ&@/ ^r^ W{'vMusL۱',QHC;qąV$H)AD!gIZu0^pڲ[Zp%jwZ"azA"qgFִ$+xltY1Q[Ӌ0M;G¥*?.#.`*ySq ?o͈_K+)y/3U-mhje@CUߘZjnXCw~y^nd5z7[4ە?7W?O #ơ>7diA(~ٲـ*uEg;&/~ɫ[E`pݚ5{C'\i}GPϫo-g6GdM*e:dБ/Av-#KB/;v~15A"3/ ÄJ{Eͮ6yHb>թssI^v\1G'bĩD鿿mqgM:ÿS/ !Vp18ҒsF2uI$%7(j?YsjsduhI'ĥ`ZJima!~|LQVw="ěnr!(u2âVymf `: ^7ks̑  f5c8cF {r1p5Mg@"QSpN#ZA>ё̏(D74ƒTDoR5"#& []sG3* A@tϖi)0mv>@Td5?>lEy•[GcNb7 UyiV>|!hP7w&[ځtƪ&rtTUZd+j7Z-5w>DskGpl"7uVpx&Z^Y)YWmKûZ.w}QHT]tj]$vME'࣌3"5]v„xݓDSVaʊ=z>qJ锲 4IZ_?/RS{wg>lf766 P")ٴnzռHk"x;ʽȆHSDҐ%Joz<3R3rn)Irpܞ=X]1<)%eo^^IoBig/`l>YJ4rfDSll]'esVK6tN${<%$$lAr<`sl9c0>qInVsA}J@=p@Hh٭U@$h`R⹵36PRǁC-УХ,Xh\(M&hA9 uWs 6yO40Fe|.a F`6je *-<’<8*(۝p"i)B@#inA .hȝFP!(<X,e'խE[|׋o}Q׉Lbq (:H' :h ` D`A vAWSh͜XøZvn ^ѻEGl*R7.W.q^o,g@SONdw2v"#X#ڋ6}{Ϩ|&Q{9NX,'5Ѱ:-=[}l!"-`IOcWO=FHXR>4mNgՓYR}GtJew$JF7Sjdjw'#-۽' QԓPթkpvfݧ}@O~H~Hڵ:S[^}Z8x+{DX)[:7oC5.kyy54*ё܋aQpVR $zgSH1=snOxp"2[bro%]DhJc%G,<{z4pRprJ$r@]}A1ب\^I)f(&?,H? 0ovspü!cLb&l/&'p_ĐrZ[+榍%%*%; dbV YvpvmmL"Rn߇XvaЭ5z nWMO@'(0RoF sM(]@ο-,Pzh{A1cDCE627kV~n.=ͬdͭ[on_?^B6esD -n"?~Ƿoۏ;wf{tvtdxv H7Q UviI˷~Y4x05[VX]:hbF #j_3] H"TP?3÷_gj\}@ؗgd"UQ/T}\-~SO]?:k*J5Vۀ"W0S5/mPpLA6 &ձx" :ɓlق&퉳Y\ r:AZh)K1Qhq[63 8D/BJKyC5˂Bc&STs&%@qjJ^J `v1 ՛ՍK 6)h[vT0st\ xs_H7z~/ NۻaO }L5Î|:=к%GL=(^ȧk}fyM/yA?qnد |,SM 9開A}9m<{c!&ݲUϜEx ݼDGbNEiNnX )lu8r@$ׁ妵Df?uI =VtzB92+$x"FENhyDŽ!H~ >:HS 6t&s$Ytsy邜IEiBgfD{3,ՒrU0% eYBdsݳ:B 2FF2+oq%1VQX3Q< eQ.VEIРVp0E\Ø<55-Ldrh, w ;$L4$!R {x##j9Q(cH!u$`a`9p֖fx@5U JA]s/$4 ]gkHQ`qZ2DI,*TOU`(sSЋ&lp+D2'JEkAvHSoyld=AHVPRs^,:P0E\>e(Th W΀EirF*f3PY9Cppxd Rp՚W.9 0dPkW8 Q4I!Ԗ̀yj5: E0 X!1evH'7L=99*QH|L41a!zdše  s|ROۨVaapz=nqFmS'>)C.wOi Io` {(k̫ƇOAn~m|urlc3KإUJm#NQm#.wh"㥼*E`GE{EE5#Lmӑ>;1^MieU>~=yƙ[ GsyZYPU&o. 14 ?<>! ZQ'X4&uƳg3bZ=qX5hGRk.[SܧZs1R̛sG_=muP<{]Kpg΢9<tF3Js2JXa˵KJ9,W3*UyH|(T۩R}O5t 29. sdP+;L^hW\C>s)ytSJ`%\DLQ*P;i-f#x'MZOPH D.4x}jQu39o5cPp؈f3&Ť2QoG+KZ݇#o:FX B4 Dlsk=X=Z"36Ocm}dt=;|/$:'+. k,< DԠB S({2L@oXBu cX;Q9m'sU?v|yh 0#) 43D#&UtbH"5&8<8G;lNo7)ߤqH,O¼3^(?P/fe|ۿvpx(ZEg),F) # ^X@([bbJ(AEUܧּ/r1}9NNiO5v_©|,SgMV/[.]X'H1uzBJhbR2Y/g+#^)L5k0Il9 q1"h U@B,~~yuNyǐp.E&Eh6V|49ug _)'~"hѤ~z=v6cJ븑_Q4e:}/>Mt8 S$4#UoC$(_f%/yR<%ϦS]P }\$O 5jEas?8>ɢw`yKN];"4V]t5в͎nﭓ>hS`-n7,v5Ю@޿ c}b!oéWx5gxoM03Q.f 2LRdx߉5FR0 (E4OeeιGi\;bw6 PxPkcgߙZy7p=<5\g(|j#6Z#G/E5Kmj؇ڎ<)ΞZ7WiQyas^{hA9J QvK̓J<ڸ6΢qWMwܠyT5„ wvXM8K^g۟9t6y+&XaAHcfHTI2i虑.|J,,+[ve*z&T*fj UY{#Ԍ0,ct BytW8V_C:l0,kd-5kcR3R;i ;̹Fh`s*t p-XFq)aYƬ6drI++ :b^q% 3cJu:)[{%ɿO\IJXwn96H>Q ]x\ RL'w9muJAylAS[ MtMyۓ%ʘoQj̑bJ}{C\>w' t)',/'9aisfX=„eݯ/΋+]ۥ<$s=1Fא UL$YחP9 _JD· mǠC*3wGU\V,yyz3#P>17ԥLzB]-дA L&~Οn`⶷Y0I?,afP'8Nb6jK˜6F翿}X[ ֞SM`!9JR\P{˜#}n9 FqR;sF9ҫNh:<:"ֳ9};zmLԺ6O#uvfXȋN{Xu `Eg:eꜴi+L´vfg;%Vؽ-%7Gy;~߬43m<\SioMU ZTB5heXrmEUH0Yߒ0@iPW j̚U"f<9-ι4jqZ-…eUl#'HV*:(3T`iWx :{uz0:/7A :*$'QUkIo^JT6#ndғ:g 0%9eÂZEAD2#8HNAx!R&Q/kJH(a<{Caf ce!ߘqHfvϖ[|\vr3 rl9b2(ϸ` ^Gm1t2 85[2d峖kVEsE\ RsǘӰuH3lASyJXwnY6%I>I-cw[s%ql[ MtM)2 p-h^F`/Ý~H9/?g$C-a,0T< x6reòwmnaiIOdZX aiIL5)|&EB΂y!,[JwI@kU ~zxXɪ_zi JA|ğPU"BkJ8yn|qdERP ҕ Y'7W5hj|LN{j\ǧQ*$]`1)х߽66nW9Iu͵HR<٣&.CLjvJ a ν*} k\8Uń.,nx$myta`\.80'xz~z|trPo5xug}q}e.C_&%|\l~!< syMJR>§PBCa h,h;P7@" .|J&;c\:""?{x!`LK-A"9yErD#hcO*Ixk/ѪZO|p eMNs6f;|")a*z ZB4{`+ko 3!ǿ̜>LY\-~4XbV~qQYPg ;gkd&Tt!3^IN:HF[ƐoHF-{*[ⅳv\h}\U##RYB#ɵ C%ђ6ȬYs@f1R04д8,5Ơ`!q2MR¬ HM#ͼ N*E`Hid:v:)ѫy;r1Hccz=LOj3e ʘVBsͲ)-z7. L D3ǘv0TLr v[BsͲ)F'x77=x\ RL'w9mų-)ѻDljqvC&I0+z\ RL'w9m;'ҵ[ޭ|&O)GĘ2^]!' c"M*]wDAoûcPʸD`ȸx`,!Zoi#0mJbo ]F~J][o\7+v=2yXL<2;;Hfex{,V/n]u%CK"Zͪ_bq4C޷Vi;_d+6˕^U?̲1lr3ks9kZvUT ww*k3CSq;Ŕ:wOՇ&k4" `'a;;"R"ɒ |a5aLS=)a=>jF)_OW0'|pr:L FK$ObsCT#H6z %jL:^z5qFG]ϋiw1G֘zqL{0D?02!Sg=66X*hg5R A9Ji" YF(ጳ`.5Z(letxNLHW*Nu C^z؀MߧٜDQP<(r G%Zj0HB}Z#1Ȉ5ط!M5)@Jp c'Fȍ#%Rà(DJE'|L^neY6{)yi&MV1ɯW١Od+Y>j6&: 4+Y'YQgȱdY^ʢN25 æ_exp=я%r'SA^ZGevӓx\ekbZ)[6hT:F[Mtp9V y.ZSK=yn +In6q"=m-T#<+n5<䕻hOvХ6q1|)o96+n5<䕻hOŅJåϝ>vOIvhxnxQgKN5OIG5BێgIE'Ou"'\pt9CB}O)uOWT8a.Ip;bTb F灜,E$CpQqbBhAFHM0S# RngFTx-\Gf7&UwrBPsK4EVH͘wpIWIV+#"XDKjf6@%RsURDpS90WX#&z 8r09 VCަuLA5Rd`Iéq)k1 i'(PZ0 LuBv \x6\jI2Fd0־B7# uJnUP&Lt+bEs٭r-)ֱLXa7A"LKmuKp^f+6N+Hr-)F_e7RdNmvTVŊf[y.ZS/D2ڤ XZv%aj1zRE[UWE>oSuMe* uJnݐK 2+n5<䕻hO m9 .e4,A51zĚ(`w+V4&Vy.ZSDS˸PhB1zUv9MxWT <䕻hOa]vtOK _ׅ03-U4iy.ZSB.ItьVj)٭FOnŊ[ y.O1S wowBL u ^cvY`0Ҝ5{fή/goJ}~~ q{R߷`mp)Dzv׾rߗD?}vYo~xwlݸsڋ~d#vJ2ܖw_S,Δ Dgny#6 'P^jq앹n..W׷ 3k8s6kJfK2$u9mB࿑qX8`]0fQ5J`܅7Dy{me8 m_s??G<{,sgߝB?[~jbV{JCl7^5쓛`i4E=Ƚ !S\4%]>ogOt֥̽b?oMib4[ H9H$&Dc}(:D:1`q)\zA# %^fFSJa*/Sp*LL9Ø)pBFcM #XLC 99rFFu].`i`D#E'0el,ӐA! )jH7r9UC~4E" v7r:Vg\q;N٬(X/|_:OB,>p[/UZ#}Xsu1!]W#Ã_xo8LEn61J~i77?$v\SH..tR=G.E 㢡% c[8{ΑD1lo/w^awo>v7޾`y1q۟z``K܅]riK˟.Wzw41iُ_?6XO@B]< ~xf|L@\޺٭*zm?t='컧J&u7ў]yD姝L "M{y⢏n͡S2b\ '`+]1VI;WZ+P sLCNMT!1 <MH~#'tK^OFh 7LrA@^GDRz3qD2 #@T`k٠8o9[ 'oBR -@rQge >[IGJ+au&#lu ǗNVd0gơ` P4&v4P<(aA$i-x>,-&2D%@pX˓4 ֣T&4pP@5wtAe CcnF`". |q ~?rnzRϘ`pk6Li=皦 L8H?n(!)I Tkɯf1?_?vjL_A‚mѻd,۲wgMom8oYN pր/R*1H$ǮQe(GM#5ΎY!qO[,\ܻrp=8]MKDZ. D` jfJ ӟD\ = V!KJ|jĆqq"lP 7T?-_UdwL0Y=x'%lpqvƻo}u;s?1<@%l{6JZ( kB5H%H" ɂtY=6B a1(of AerqtQ) 3VF2Yt_+4C*s|o)oYE~XfSUjO8=K^jZ[V56VE- 5Օ)!bcd$6J귙Z\5F꺰Ej(orJsdOucm=덍ޕGP3\^5gJeW 鍱 uUD6_gWgV,- CcQ9VDݼ+ &v;+!Dgbnᱵ )EA0%{[b 承Z\l9穧$d(9X(0D1/0 '] &Z!v+U^}]8>ÆcPVh}aZ`]KSFAMUYKj$(JQcZ^VQN] ì=ΩBE4H f%!Bui?Fc@5dSg"XԣYhnF`Yf>9#xش~cl?kB˗gS\>|hM Sh ryOEߋB ̒VP)PaWQڵNsNƈRPB0(KM FT׊״fZ!DLQz8@I(>@ `""2ۑ # 'p$8 #]}]Ⱦfڽvu.s~\->޵:?^b:x]z!uCK:L\76[s(0ʷGrVܾ9ʓhzmpyjR(v(^ĕZڿqod8zGqwHk/ض'6 G|6Y w\ߦւ.]$fBV Q$9ۂ'cPʿQ(T\3C*(%(7֕;RJč"\RQS^7Z8cmgI݇ђ\<&mkC}YYrO^_5v#Ŭp}đ/߮|^g̡^":xJT{D~AfMqJfmY&0Lф#ډ}QaL Ld©LdVl4=P&$D›bjG71v_5THc}б˿k*d7j1vj@E֋il`z966r:4Θg|e4_};-+s5__k=]4ޝ~؜M.|AznG_cXLj"wZٕv{k `Ze?& i77ҳ%g7+M5E" y"%S]O}Ƽ܌-!6-e14SkY-8L~缪ː|Pyjzg5<<ӑw< ჾw=5̏ ],tpE?š񥾊dW=>Z! e9j(*|}sL ȉ/ "pp$@`?w"l^$c}ʨmg_nzj|\/,09N.\^9rryb: 0Ҕ\Wu0/b&kLWjePڐ 1M95ecGľ|-Sjfl_Y1NtQa~8 DS\ݙrL i!A.(K!P,横Q*ˢbӢ"Ԕ+բ.!X~s7H,3w(p \ezt{QJVp-Td.*HijS*cuOxI݆PY qiM؊TYKQ7'vѓ؋>(5ʲ@¬{JUX& V%!RFpQ*ND_7b(WDE1""_Ӝb)N$U8S<?*ks{pD37㜇^H)=< T(nV7P>w4h2hڗ>F`znaqiz(#ކxh22AB2zᙞL@4a h06AVIo;XmxFɟقE+kj]­Lsк !߰xno ˙Wк\9кɼVcl;ƅ ;-A_mh]EN>v(^FomuvPР-ؒ!Q' Ų%'cɉv,,r"uX ܻ;i3-YH;^|񚳆iA0o v^TIjRN3k$uCj!↪H.wW-wQƉ<6Sot8Q0Y0it6&ļh8U-5#q{rgđRpE/@B8/ŔȦbq; H®.wTɵGjndvCP?ȃ>֖4;gEnI Κs LNd#J%i8xFSR@I S $Y^8<,rr4VBE:oAN"|5F+ P3<7Z]UkI5(H0*) 3w_9,!1VRkeJj5J85 QE)X@UR!8ɣ,,)}VU؉u>jc 3Q vFԧ걅"WD cZNjY!eERl=##n76"RNWzk% mm?XO! 8??Wghz+ߝOұCvֻmZc6J.nV7Wׇ?V/_<&g^vWݘh`U/_68׺z)P^io~T.گ9҇[޻ OtЫwx`ǃ&k>LɻGpX B)QV"G;|;뛳<i}1|a~)g1i$[lvQ%n;ֻ.>,͸ny_7PQr(@y$=ϣE(- \/r(C$_}Xtp[/rv/ӳEeD^?K|~62$J<[f19a8D^EM?_\Cn8{Pn^#8?\>\R{ϲׇako*_8"Ͽ}l-tw݇쫇< wYa +oqCI?G+yǚ`̜{5'Ys5'%;aum%699Mu!7֌BQcsa 1ECRDQl;mR Q,X)'[,iNk"bmRL$ςB}Z=Kwa y,3Mjm<-ιxjr磆/yn P7[ݪt Ic{IΨ73i,6䒩`lRQ+L[g<5pIDv4B2iN.Wno}IP-U s4PJz Q)x<&.s|R S| c牟۰ٜ[ 5ZK@c BEGeJr  dQI\B yR-)84V?~DIRWBtO3btuDsm$x!ghqxА2uƎL4wS tnb@Ba%"M8qJnU#̌!Jm ! ߙF\u ,A0Z&8kFv0sN&*`z{H\7#&[$nvjzȽi@$50c6fy]`4Zwj;ZGMv4 ~S*u)ma"pL9g93cKS2 teFC!;Tֺ/qv)e(*sVD$L*U MAAE,-;gQD맬g}-C Uvu4n<:56Y,v@nK>kxb~ 6$n`Y4[τB}r52ڳtY^Ҥt\7Ms3<)K?dqf?ú,u֓P4e"Mঐι4+v$h4WujM6lC+aGaO-6DžI[3aFJ% nuz쭀|iaZ}D0M;ϯyR;}m̞iucf"8,X:j+,iNSO|YtzC=$Y,?o;$` k댿iSYtz;Y'ON.>8긹3}]d]inW,XjD[ǿAxz'X[R ni!-3M՞](ڋeږY5GKo(/$!=;fFfӨ|UWtmW}Ϣ˳gJۏyުG:t'HA^lvYS=%"sc*jr]ݬNNW[g?ca8V~P~{w;wMW[0+nN[ףq=mSBN ${6#e|{XgK_s<-Su+.)~6c$XuD= b:r˻{P1㠅q_F\Wr\>8gt/zoM|[`f+[+߫[&wP~/Zo׫<.+߬Vnܵ_{o>pGV>j?˻ˮ1쮏O/^\^]V 0-TGJ>*B:"Rk=/ÿ};>|ʇ]Y51>{9rQ;服u !śwѻs٘*y9Ft 1xFB^ _jL<777SKG _[ǐ_\ $h{`f$z#ncؘ4Vg{ސސcr6l4x.B{ qU&Mxa69.9j C핼J3e4mt#myK҂)$6hj~|ǚOY2p;j)Ȁwπ=6!KkM6j|441Ga'c[:m޵Fr2KpL*^H`&/l6Vd]83fFd_g$9@]Y_*G2̾ M xYuɗ୞:Pȹx*+SbzaU lXǫjKK[k!@si9*IC~BU1nױٲ!2,x@Q6,H;Fw|ycO&.,uK|6F+d7%iW1_ @6t1䏳|^]ӓ])ׇVe?=cFQUQ7mxdfUX^`s]Mܞ 1.QAhxmAtO%Bolp;Wso-E,uܣ;,BPA˾!.֍Ê3 K+Z6f휫m=j`J5~7X.uV~+`-u.1jL53rY6Pq%-r,n״;998/OW>ZKګNWT 2mq:&e?~ ƞZ|VfA^l _i"ryHl?;ڄ>;=Bόt䌔.fwzCf?ț$[q_]-RLc3Ϥ>TMCvx n:_EʩO},wsB&΅EӚ}FЦ }Vַʍ)KՈwY* .UFRM6BlQfeO˃|h3RA14^#%SҜQ0]Jb$ip7^&-Mš&ix WJ ovG5Y6R[7أڄf{pwG7ןz[ּ-~t,kxôUyۍRb(p' ƋXAb a&eVndkj_^RT*5 e+Phutu rGM6J{/5%ܥXcIX?A-XkXjF1Qu7`OBXtPz՟mMOZ:g>m@n ko!h|[kOԧyT;IS}7F}怜-@kmVvmն}j[>ٯ8j-ƫ\k&W-]ۇGZF&Iۺ|$d>cnʃZQ1 zz0aKZNxv ?l0Xn|tqYNiyd;$3Tڤ|緟~z4.0&l5K= B,z*6'7w5F1g7+l/LzyvS'L/jtm|yrq@`Bi-mSBD˺LIua49l0[9F8Z' h'tC/PU /lʚC UMF £.vU<(PAp ckl3ۭUv:[ GSBo`%q6oK͘+6^\pLҺB-9rqX'¨ ˕e,N #ܪ` eU%W\RJ#JY'0DMA>Q,neާM0 bȐf=`JUd\EЉ@16Z2U7d#Y-F;-`1"6RXmgĀK1aM=4)4 ]ڔESLF G%'u((L<7"[iDgS+hvL++I*,bU`y h'ϪFSCU,8@,F޵52΁eɠvPLg8lA!(P&Zō,tȴ-|U8D ܶ5I.]>=r h_G8 `l\fm£AC##;,y f 2־oGaؼlw5l#o?j#grkbb!c%idkNl(x8*{TebS2Z Cӆ~yNu+K9dbmb6U9Z,2cqB䱩0f&T+k 9xn\d@ʻ/HƀG#I9~\ENBK?KRԎou4Heo :vqlj:0ZeOߺu˥=*x hbhF:K' !*Rک=x RcEMBip2U?b{R4?RMyv`N|85?6jIR ϫkʲRҪBX(8@?-kv(ͷ fonWΫK]?t6vQ2|LX|&ߜ6+ Ϋo=@y=bRꈻ.hCKCJPU^~镵F8JU-UR+;--cxp9Ji!R9Dͬ3`s[1ƗGkr&|hD  t|KRv|E(KqsЩ$m9<^Diskg @WL)Wilコ,Tcz+ Xi~!D+)kd#SL O^Ax)u0(B4yh٫c<@m4=7g( (.KyzQ٢"rӘ) F8Y]F--%LBvQcR<jy#2jv,Ri-;ЪS(&>TK,}8]NSB Pʕҕ85WI!KdiA\C-'6ɣAqKu t϶ o !|2U"h[)eXic `De aYr+Yd(oȂT t )IvXr‚ %F!iVKH wR2YU3o!J'm];o.:p*x&9UA@H+@f&pHnb̊{c+<*TX{TRꎠʖ}h RP"PJ%X@hTZ оkEw.9sp{#$Vk)YcM}hQC39 fo-z(Yk޺b&&F12J[&MdH%rcF?== MFaa fuCÃ2-ƣ.f&m _Q[$=<LC=L{L3|zt-nFO43BRz`#xZf&GNUeT*P$i s043` HcHÝ7vnoj, ( ps< -Z;td/VYwz Wz/9L2hrOF Lȕ@YB+.un(ey4<=FȳYJy%‹a`YөA-6_Т!'T[Vk}}7ugbhctܶD=98 .KSB`8H-7AB]o9W}])$O|\Y 0 f&ɋĈ_Gfrߏl24lu[j7U?š S .ju-ps.i)c\$!"Y>ݸQ3ad_9)E ? n(tUw$kR :%C(B;5XN*c0(?d* Rw@:U v6U*<•fqDutTSDW29p r "dZDRu~Mvv3mX ֩v3GtPLo6:YOCldc5]y?*Ic2{vɍIjd\E&"oy~zAP~;ƒr; ^ʏ54wFZ:l5erBX6Jg99=ź=g=wDtքW(L9q]ݧwfN[,|;Fvk22Ι-v!Q¢vwA:ch6OX9mn[=[y)lQznEaoz}&o+E ˣG d’NT = _--yKO1 Odj&z9JP 0fJH'ŨMZ osD)6R'PJ=wjT(=c U>gQʤJS=٤=JRnՓm1ݣQJR@)?2;%{#JhKJQ=٦ZOOR[@驤-Qz(ȫ|}΢azCd}yd+an~JQJJ(NS=٦Zz5JO)"@)KRQʨJYyZƝzI9=Qߗ3Jb>(YQnS=١Z~3G_J}/E(=Fdj!u陣JC'KS=٦Z=JPJR֍JKңQ QJJi!U;>~(5T[TRi2kIU'P<Ғj=J{yHsl.ܗzMRPJV"Na#!z C~]YI DHy* %Z+ɍe A'$6 P϶scDBUF.Sx.l>: mb Bn)7s}adQhgFG0c7V>m[o?|UlvuvEB0)Ji͝1B!NͬjBi|{BE+f8RXxw؎4J(AB HVH ͫ:D.:y#6DR(ZwW*< t.tJM۲1:lYcæ}90ct!?![c-'-z 57N>Sg w=.eOUUךZN# 5Jt 4Y>Jq"FƖ*͐H1bc_MCʈ;nBkt/lFb΁boԟ¿7͖Ɵ{{V<'G¼e"A"eCz:;𩬷Z#9v}|!:Tr Nz'?qS }dbwR:tH's cXp!ݟ^Ir:nxjQd8LXr!9 e"! DI )zF0 0Qu}D%ݳ=6bQBx{;4Je:$aQg7VJ[^HcΕƦIfWQaԘK 3&Yݙwىo)Iufm`Rr ֠_}/唚9 n{4ڥU8`1h VVO<'cnSS 鯏쯼?Vuw/<__ WKvL !qw< ?7ŞXK[&f<p!3:_j}oz)V M~nOzaPOݢei)юT:c ȝ(*Q%r[ +8(1S{1nw~r-y[fܠ3s#.󐘣ӿ?{Ijb\Oc`DN~2f<`_'7 d0ZZu(탒tRO1RDΦݍ3sKqWDu+폀QszN7ˡ9B\ "d4CٓO4ӪʄVM@PO1 J~P( /TC$\\`VmN-8Uwlfq5Gt+S1_kr nһQ?zV$6:: 1}]2٦0VriX0_v_]neٟCfZŘNx/nrcWͭ-1&U,΀ UbXRn'ta_Y1fF*SnT]Z ˄㵬1_c:_ rsNN F@ ]4곞;\:jB+h (ߩwrrZn="a3dA`H:g)]Nud+2P{EXS{JR#H[%gc deEݭ%G\D :]K8JX%Nю{N9W& wPHޙrbZOI)/'*ĞXdpRΑt'"c;ĺ7×g;Ճxw̿\|Kon7sW'l0!o{H")Dq݌;Y^ErER)yH*<8SidF (7ʃJ#sj'XxQGdxqcS9!/  R3fe E( X Ł#YA,`%6#أ#Y ++L0mc ӄRÀ`)TռȡIXZZX`릞U|KȳWQxȜ+ys6ݼKeX=* 9g.UAᢔ1MNwXߟK3+gv4vEcD9P?%eIcfhER =TR>:6rRV(ٻ qI<$I@aS pe%N3 87W(iadbخ4 _HS|,)ǝMz=`"Rg+56qP5A/6< 7\-D;wyąwTāFz i \Auq|q)'*lϕ[x"_\ WUݎ|qa H~nm j?h. do15 Pkc!D ${/X(:֝oBrFj6c'>M 3uUwaZ(+qfH9f#߂ \*2zoߋ0Py%w?- ]9 KRpaMTT dl(F񤩓_~?"z[ԏO&$lhDƆ&$)p3JBqN<$V&5VQT HUٮ/zh}:Q<1pB%/; #Ysk@`mȬ$p8${Da06  μM @%cV!"/??K $[&^U_XvAN|zpa4%o10c`ȏEF.o`!r>%֤URZn0fԾG, 2P۔`Z('Ao.WtD~auM]8)@-[?q%vH*v6i3ph̜wQ4#pÄZc$ I,h)ImSLחսQ۷q(k@Ap}H÷p-*2*T&F4 p*B&0|M@e0 z.!9Jx@੿CY搎 QZ`>1&h I IIV("SŘk1dbAn #~HdL Q8x`Gޟűϲ,fr T8-<H.dz~zACK8hM4㜖aDFN9 Zû÷P\ #PcAI{%=Dvdq[Iy`~W}V{΃:?ᤂcK@re72ZFb(/ǑvjU sŔ=&̀Pi0UxڦqiVJ+=GJCx74iK4[9>$̼P4uGZd3A#0JNWR⚩:C)TEK8 AS~ `8ٝ\L>Z.fquTlF+r1TFJօ%:CgNw?N5h ҩ>0EhvգmGB qVrCv`m/T )&ky򰷽~^!A/{%VӨ}}P /TM[i[uUZ׏Xci b 7}Fk;ݼYc m©R_r/[P3d#;}{#[ )ڈSYn~!5A u>Jv;("`Z?tvk~ 2%NEswGkޥs(x Q* ʞs8sL jrnzwa6+ԀÉ/:ۃY?kmbUG""tEV)]XDͅ2+qbIaUլ*(PCPtAۛА{Fcb],Sj 0N wQ T#8nT yFRP ɵmuN|Zm N1s$H`{] q!p=0jW7 )G1U9XȻa|%SpM+W-ĵ˺Qo{]UWrȱp@VVmqe˘!记N&!alF+u*!GSеktٻRwj0ۊ۪2i?)Գz,###G4 s>1W)F#=o‡~7{Ϡ6sLpFR_QhTpB欽 -:NH HmBf̤l>FCD(#RiM1wrD:Op8ef$*P M&SPT$HPX>*Jb$,[-"#)(l SͺY~ŵo[xn_o?}~jUBb+{971AG-HC-oU?w/^*G!Jjb}N c3`ˇ*_1Eب~? y.f6A5U]!Rm .h^(O5jb5&0u(I-[;{d *< <gW'| kZ8w&|G,3L}f8jɒ%+0`R4\ Z%gzJI⌕}6Ua 'oܥ/ ijY#c~|O&aLbP(eIبӘ+: U,4ap9|~qŠAa/g>9,/;EʇT#>\ۇy/o,L;&Kdˇ=Ff"F^G ~i[X4PUC@or W戣"wTID9Yuhk%%+PYR4,EKv!8s2ƢU UP8S v\5hJ ҸV+,=irRN=R~tWeUQSd)F(d/X-Pc$QuyVlVpH8mRRK9W`.Vk<'R&XB`)q,eyJ,=5rR.sz]V_TYz,ER#A<ܼ,eUjYz,U'=iهY}9zʭ->f)%q,$׾Ӫ,e՚<u,eRyO^tWV\J jzg"Kc)UъRXʃp}=6KFO*q Ke,=-jOu|lYz,[{U]=&K֞r}+'RXZ{RX[Rq,E_*{Rq,]X陥'RKYϯ=V_Y;g4Kc) k}cUj,=q2RL^8Vk{RXd/XYKs絧f)bKq_j??69eKꗆF>V_Y}=:KDׅ'T/XJitDӭ&&2+43VI) *9Hw$!e&ﺁ+%|8 *#MbJ?~Ic~7 BYc`gûoڮ{*+uj8GO_sw铆J]ԢPxE+NM0㵻~:ܫ߹fO7מ}1E\ <{w0Xt3EGX{kGd8n !N/Tiwr)dwwt<}!=7\fa4!zDScEkwfP6 ֢T%ATVxS(<]x>vߥ},DɢfX1@k~ -K <@xhpN XPfs=ys~!A9/5DHt>(8cEbĴ$ [gC6K(R҆%qщKRg*sRd8P^qPdm@@:*wM[ JO2k*@;bR*3 e(Ex HMeA9+b_/l|´w/ⷼr;|l&M]f'׷?.W2SF)GO3ARg kУm`F* @&SHR`a"Y;*Kp mF5kʽߞ{@3Ƚ(rw#θ.;1R@8`#!UP,f4$PrVd$Wi0ey_q>5!.qxC$37Ԙ\^=HCon&z_`n?R.밽*@ŧ?3x Th̦τ>t/8$b01jHII|Jp~z$<4e~st$J,O*€J/߂^ٛ.ûZT 9n0L:RAAx-ʠZZ3qXQ:AQ-Q(ץ܃=Zgd3eB\>4ͧ߇y.nj6nR#F 9)ԫ9z-{[tdl" AquOF6  s+} `zCΘػV"<ٙo7W/Z⪏ě.HOo2-"q~4򳋏 N?o?O2Na&&hi^L9ZmP~+rfB1*"JB_1ފeQ,ILal2nSGMSPCM>v_ǾgmpŇٻgwp:y0  > ,9BEq綅j!9,T=^1J<9,[]1u0k$7@_E(d!9ǁYC jW{CΌ==~4zbXl#nO5:=P y{F/^㦖0U;̨ lR B}Q} DG^aoYmphbnn+/nCC}]k| PۧU!u*W.^o~cEy`"CxUS /]aN˽u\)r_%[g~ ԄFzru֑sͲZTw-&()x\ĘN7RxWiO_ݺwnY6#m5 [ bL')mS@λ79Dz.,䝛hæ"kqCG)l*/~LݧCp,%FݙOοk{yH)D`]Ш[cMUlu3q&%vSꯡ4V!} ZvM3z1Xji(E.TJ{U"JyQ9gcFPՊ޻gA3i?]l~ r5JVH5E8& *jqn<\57ǒ uW>i*c^*@6b`wz|hOP*y- iH rxS J\Sͪ[B)u~R³Xڔ (-V!XЈT %af0]0'EFJ)TGI1%2Ci%2R !R𿍊UN@jTHhHlZ![(F19Ir jլTXp-iJ<҄riJ@XQL֊ٴ")B@Jo£JYj*,^YU`MR芰K kd _Jeh)z j\w![WI܇lklA&"[>"^ 'Ml8W3EõM"0U E6_8#eEWPO-LMy?) 7ߴ4p/}rd-w%P{ot<2`|A4j%֏S53Ĭ N m~o})မl5-n_1E=<0Zz8`& zzp-&`4 '2цb/hIc$@6JJc`A Abpy, GAkSҏmsoa-;sJ:^0jNB$ \kYA-YvAjIX0j٢v_>|\+x-=A7`'I sfaJj(c.DQ Zv7&*՘Ju7;PK0ux~ҞPr_<=uA/ޫ1r1y}0wՅDM9JE;.\-(gUnX;̣"$tc)]/z2 "SRI2Dv 1x%H9Xy4* H5e5R+{؆;Ȍr~xE s79 "}\ /lN ]{ 2Z`S#f 'zvGKRz)V16= K YԻ&{zM-|`]IB|tֵ$?5\}+~়Z4lN=ާ&A#Ub XatA zNb&fFY;J '?=<ˌXM/Fi<^kwiSs ZS-v\*y>~-F*iߍ#M:LeRMyĭ. F'^ءZjMTJ: *aDC-ީ*!*qQGlXͤ= !z5e +jITl572c}Ćj1י%!y=B}v9(+ O!J9^B4G0w'}%`jq@ c]DkE#Sbw/ =ν PH pQ,i|E,㲞i"E^ yUzӸL~Xg,_~J]hN:E7~(iܑ~lO.C_1q^ QI&Hh1twTRy`#YNJjp&#e:"#0hQU+dZw3ZAs^Aa!;bECT#ǘ|);ƤO't'0*FIy}h^UNRJL ~bFa4?ۼ׽Q~-HoﴣWOdt#00x=&*8G6ڛB-3![ Cd<Ko-?c?v]B&@c4ΫilԶVlA{5'Q}U۞jվ-QEMy=ڛXKT+/Z.K^} u~'Cڋߔ==+oޖ!dML{oSdT2KBprJeE,LKYm]|+}  S5G\{# ;>|xnO=u 9V;Dz^Xood$di{kVCSn^S,)kѝ–}h[K=uA֋Zy7_s˪_+ђ9'&ב 4"%g3][#3Vag_]{bMKn6og\܄@,!f"^bg]LqlHBPP *}DT$J-|9쮌I>m$Z1Hm]I-JѲQRVjRC/t+w84S%'@ZB`€6XZW!^K),%BF _";cZq#cD!k))VZJOmIBFd=-Ԥ} 9S[GO+(Fauԍnst}KP <ԓK41 2T]L2AeLlax Ԇ,TQZn-+L-)*¿(u1JSN]ANXlق &i7㢢yrFSWNbUHCQ#CmFjr)4y;-%42ƮRx-5gJGZ)_H@% E)*26BLi"WOS1؛>OP4{NqzN\l:?4]u5 [sm~6\m?Փs MkFO[==w?.UH5*Ր[}|1ܦܫ$|Awp R8UPtK,lf4^2We潼$>e7 MP\G==wmq8tmwI2lm&Lަ*u&Hd/xe<~%?Hd}ʢ`ZA},6#A88h` ?Bei%D]* &aaR=s u%]+s-3)9UTyQ~\eږڙT['˜lEAYKyXyfWj(7taӳn}`ì$#BJYZjאbht!a+SrwV}隰I@_ 9[.Jshyi?IbghaV u#faaVRk!]RaV5١pfV6JRK<| u9[)(3Rؕ)ymRYTW24 +=%] uy[)0+ IJYi-VJJIY) o>7Zj/gnJY 6D$)eKY}}U}}XTfN䴰v5 el+{0]7JCG2`Dž8pmlrs_=l2F>w%&o‹|%g&] %.StIDG>H!sa`jx-e'6ݗU~£-nW"tr<}Y9pV8[E: #1]"Kn^՚tFZ 3OcooXӨo'tccwQ,#)]|B[I(/&~4(ƺ8yM%2ɚ\JAɼ*XUo79TR#݋9T};uwF_04= L( %0W q*J oWVٜ.眅qN0:Ӡ)淛b}*_|_V?և3~iſ,&EYo:6m~#f`qD8ǽmWK8/D*!Ad&+ܘ<*̕ EB1O`OہCJ%m {;+zw3ҁDBJ|oY iD+ M6}XiNO%'y>pznJ ?+A8t}ΠRV;ÿ/{𥚬vm-0W?<҃sXlaYoW_0-x*=}aV³2Ϣ?},&h2fz?#@@bO&Ts>C-xGk'w' SLj& }`#wfUM`n'7u~W=FB(,[=b$waOg} r= 4Es ODg MOlx"Ն7cD}|_"ߍ/`VN;NyZ<,F/tqNۉ w]5#UT"Pq_=%ǂANSSh(r:x[O5j"9_w*]?sƵy|)j6T]A,a}7}z jEn"y}N@E?*ׇh=Y7N6#ws [W BLgx/ScGn+[MMіs yϻҳݺbb:]?#ƻMa![;ޭ y&ڦ|,="([gu9_R-5Xv@r _vk/}+QbDA> ZXցD\Y҂J#r#J`.unOXW=y<4]Dx_ӭo-v{牁>dDlUq[_%Y5:OȎZSeRPS(Ubf3S :y4Őe@ ,*Ua #BR'r9' *Uƫ)ሬ*UjhrF7j\O Bc0R@\ڒ#ZˌYT M9JP:qzR$F!N QѠoUEqdڪi2yadfp)B  mVEb D!b(}sw+ $"H#.fH@iFi-ĪP)S9lq}\;A3q\b"swK4ʅn$&zKBri H#4=ㅽr}+9WqR0Bõ"0)mW=R-)p1A~10p17 0'c+ ҉ Ag88ghlG%;߹ ]F=y$ex2\"2OQe2uAGKhl; Dz8Yg-E@ى8֒_T<ƥsx랟/1;D1(mv2466f6Gm*eY,-He@YȲMӎAg toR" ?C=[-j̶gK P-DEG>5ˁEH?F 1Vo6>fӷ~ M\S ~~H m%VЉ ԉM6f1HFМpN+սŗu^I{l2A5,!G3, g3] \BxcMgG-񺄴,]BOb%$FD#cu2?ĭj'Wq}"A0.!A&6a %jSpy!+y ]ct+?:HmP%` +Nڡ(tJJQ)!ئ}癔TBWHک<ڪ֥`FfO(w.o/o&/wac !/]/1HHʨ\%)S!o8H3xԶnHb}-Ic˴ OR]xE4ϨȭRt2 &9휦3[рE=@\#_GP-K*YteK[fYdee P9(J3ɒ2FI9~d6FB`Ɯb6fnvֳ=*)9ͬU+l*SNKFB-M ZF~i0nી'=|PC  NT&.#ѽvN$J҈ֆG)Zo d]&!ԁ&癨">Y݃/5Dogk6|5kr x. Nx#1#c` ~zqC\`G 8B Ԃ3lwر+Eb87}1y`̅Ρ@F30eQ7A5rcYs$FtO{:fH,+PoblnJ*l8ny^j ƵtyN̯[}Xx|Yy'SMr+,sx5܌=X/VËeRM^#FjKsPXˮ]X6J3e Ųq3i5])d*2OA az5aD*M!&z!BF]{ypJvy.?;!ئYMwwl[M%+)jg+ӝL#GM]L>jiͩƙJ#w!H|ry~T1,U%;o#Β^U(qO)]W?M#A!4w:<ޮ_yI>?T?H79a:N7P et([AusN@;W> @Eh*l=~c /Li0+;qL3_rK' ޛAD-G$hoGa1z!(bGc3@pqf#x|j(jh#fJwվO#m;t]fw3@FԤAIQ#UltzFK= kjCFC*I%w2=ES"!o8B.B:p:1 |o8 ޗ+?ϒcuHYw>YZq~⍯i[O>78^<l؞׮\qZ}~NI׾I_Y9bYl?qt?Mnx\e}e0 N dTl*GX襓Z[2C)E)2[Vir/}.4O]./f ;)Pkt6(jdrMWm?{ƭ K/眐иY[JfTs.('>߷1CI! FiR.ǢFF`j>%EX^]0E**K4Gٹڞ]MQ!: z>H1#``X"hYE)2R!<:4LZ=zTMhrK}7|l $/IuH޸oplHBJP@@D?GfA@o\ ȃ8 VD42 %)pH1ˆ,d*4t!JcƑ)262T\ȘTGHv'"%i 閮7sw,llG& -4lvmlhܘ;܆ _ɪ}|Md,Pp2ɋ2'FE9%<1zQ#ԗً ^cP9x*g?V_U~I#VbnP9lf7_ί'I(x-1v^7 6MmW' rs;Ke}}(Xx2㶼⩶g?z@'5kSZ󠚃if VTN SlBJ4Z F&5Jd2NCէe(4M(=B R,Jǎ2- GR8G8+oPz2Նj6QT87$JuCiF)Pz(G] .U|(G]QvF)n(:JХ>ݦZBaҥnjRaY]jݦQ'OԑͫoYQͫp:FSlE,D)07fTQ|:L\B LjRaP* ʛi@!OT\PjY` TPmd7JtC)JQI7fT]z( <.t( Նi?n RݐR@n(ͩ6lB1;zWᆔ;zyէ6JbUQߕmL͛:RmB//bd#Ğ؃⏮o[ڿKD ~r+{v\o{ϙrhah #4"H%bIԨ4"abP$^>{svJxH?=GƐAQBA J(=TAk?È8*& D&j;13nϖdogN˝+mݨM.7gmn7e,~+y{Wlߟ7w?>!5_W_ؘW'&9't^4*Aه_^RYݬ7ywULJ`o.^m Sd n?lUT+"(=O ۅ$@NfWf|{yϲ-SٔgyqChIT<ϷKrE< V91|>s; O&T  |3󙬐wstgՎj=y=WC(-ȭZuǓ 5 ”.q{2苫1\}Pa}|ׇͬIglS},3_g,i F6$XrPKCN]'ﯓ(̃6uܩr beW/f/+f|,ǿ[#-ZZmhJݻqH\iULSSڱ?`XFI6]6y3aK$_2 xѓ9Zy3ha4t\\q2l qwgpiΪ봚, \~Tuuw 0vM JݰMvsv֡\AVϧ?u zG4ڈ5{b;TM@!b/8^o#?m4[8g6jW^FM?GWe hRU.]fv}r;Ur}&z4;4{b3SK݁W8<ʷ럒uA%ZKyTNo=;Mj-Ơ|ywnZ_=70@3-X͗.}G6)B1{nڭ =OryZ|0V땵|*oaG$.ǪCg{b`4b_P$!T8w USZY=dW'9"eh/ a߿~rx$ ss-p„hw+q_(H<^xqU]E$u?'+G3:8fQ2 =7ݖiZ]ac+TX=yOy穯N'{k×{k@iVv)!7յdҌ>1}Ccj? hV]`U 0:a!c$K#h\P#MTČ*HCʀz,UzȶCxX77:RT" ݒ O4 \A!qJLE$,S|E-@SO6-@8@D?@5$EM Ai%g2Q4Q`;ltQSRjVjX<:K0tW՘80*hJ SHciT] MX!%L<2Fmco%,fkLJ!JkM{;Ke(ȬR7*\p[V9OQwFHƵĝ$2JsvQK\V9{r}Y>2֢wV1r )7U  4IYeljn L u{@LvCV]GU`+˝DS`?noܽNbx KXj^34dl7Uye.,SLɤInm l5DkF! 1 8 | ?@?n@ YJ{bԂ-n}jg]/,vJg'Y#w? &\&w<<<<+rhu~5e* 8O4 !D4/*bbİ(}&䯗i4l /Z&3t9)gs/o foEc*(fw8) %`\cAD D[) ׏5Yu7WgỷuT2/r*;+q) idHiŔ1@Hh!@8Nmo'$NY Z&b=zkћe9FL>8F&֜tQjxF7kլQ3POBS 'R1!c$F'Im) i\7De1hz,Ds XjȤ1F`(IF>A.{( kdփМCJ) jr %AS{eJ %a|aehP+Hs牅1oJ{ =0*!R VZ*(Q=6;YςOq<vE‰\$ɟ`" 0HOaH ]ZIVER(L$=OaH\$m*..`*ypy ":i2]BF#Q=$ySշd@=yL6]=1Bb7;<}i.J{r4HuR ׷noWx~ Д:,N9yq31E}D' UB+lE(4򯔯ž%K`GKx3???@_/D BP !ti΍ќ6QDH(>/=.|]޲%jJђNvdO%+%)O76u\`7 udԚ4봗&&N(Ԡuڸ@!:sӲx٠d w^4~4][OaOѹdp[-$oɉA'AF)vTqAe)t%ٻHn$+B,UKllbٵ,d5#w˺x<߇*.)1%-egO8e _ ӊ* hcJnbi A>Ï$YI{ @ q%+HoCH,Fzg} DpV`4'K4u44IMUL=ۏ}4 9BjǮjs&otSmϹm>j욾ӓ(hct6>@Ƅ_6hJ3VbeK+QEX˟y_k<ש.q.jS3N+FLJmm4"n6:d @ϝ)9ma՚84-0yQ*L8Of,˪κ^W?B,&qbͶB [( +,8PTYx'Q@dz30skLDq_CT-#҉է{:޶?SS\$OW^h2=0p/>?<|>扟o&5o */˛'OqW[~mbYL׎|ԩ}[5׵m 8Anٯ:歱My7k꾠:=ƹ|fZC)۲(p_g@(xzz.M9Per}bEi1[jf@G(t}qjK/-zQW-+z6?=Z{u6<χ#:usq2#e 0L9\^2U8 ӱtgtV:Zep+@uF`b1=Xժr^|Ƶ}r2jBʧYniQ:[)k5E 8oGq (K2BIX-=b"%;e{E5bYEK M /%,֓VgB w~ Iڙ# ,'OJ7Nw\ DPRryHeD f!PlS{܋d}xp&T5\@3>(/ A4gB*K:?H#bv];^/W>;n}'mWS.@ED/P<\]QoXݿI*ߗCCv lC0Ey M(=@\YG DHFiTV"~8ƔHfY`x(CZ4kʉR.tKg}׊:gn霼ZW6(YkD}ZKّٿ7V_Xk+%acڷ笾<@,QzQ 2/JAVr[ZSFsV_[m,RKUJ{~Q2sie5 Ɨ(=(XVyQzn2b#V_[ucsR!TT#>rE ջKaJҘRT&I2/=(*Ic,2Gj(sBF.*ÃFV_[Mf\E)jY,9/6yyG(X,\5bWy/w֌əG`yQ*fW"JˋhuRˈQ*y^JTqE>gfGssRȌRXsrG+j s|yc/5ju4Hv"%/e7B&.ݔ?'n¡K4ӳqMhO.8'w>=c/ȓ8e&P\Di[#ѽd8W 6Ps#:A}qfVR)vUb+Rsp^͌~ոr(.\ӉHe9]E*Ogy9&R ]L[߀Q5~CծD@r9-ݷZudN"j.cu+(E>Eo摴Tk&K`7Z4j% mw#'sː`k@hU6!AX9ɂN# 8]Y[3u>0Yh잿iA{cXI"H_uɃў1I2򥵎OY9BlL[@ɳ]җhfbkFڗdށ4i-6LaMA%ҩhMH0ҚN{77`4MI{ad214k##g˂0B .AWP d4@]DtaA[Aro=2&&uhDU&-=Cs.qyUЂiJ}f Sdd{n=ɀPpu}F1Z`u{Bi{r\T@ ˎ(helJiЦq| d@:-2# vv@Rܛ>{U k UeDyX6V2Y_F`d}~j}JU3fUڈfh-@S)dgH%U8gej6-S[B@ Cak7ϻChW CDPwݭFرFQ1'QH=xibwIKG>,!'1'U讗f1h;U[Ѭ'Rn-HTglUӓ{B#}eV\eJ(J-G\O/ڂ,BXN=v5./_i#| 5ooX&iz]^~~v>ߧ ڦdbv/۠~ccyjЇrU?]f}Jl$e?j)S|Irnk\9W(Bx)g|?nF! 't~Mv;ݖL)ɲ[Imy!+ROb8nTp6픫@C[Йmy!= ZTlR@mwtTh󧈄Lnѯ}Ja˻~hIGUo&? t[XS_kaJskwdo^{aIt:W׾Hۛ?iը߽}}eo?>A .e16s }yqx~wutS^JNI1(2/J+ -gT@EcrQ* /J1 XDNn%Np=SW.q8%-go\bZq5kMUNKK\Pae3+g8hZwlZ9~6jN̗f85A/ңH2 8KIDq2 Q|R^> ƖVWF\n%S ڸ2ԷVͅ?f&aKF:ez֌PO_;|1h*٧kw{7V2) nlD6ki<s'ޱ77^{& B+ҠW['\yo4ab0כ[O~{noO%/-5|㵸 9l8YK]83ڷyY-@ 7nW +fMT{]8^H-k7*FYƉ6>>=d~"އW4gm, 2 RuSVaY(B3kg(]lm $)v${H79Bx ~pƹ-I"KJ8r 6`9H Na8@dXфdzEfWuWXaQұ@[%A!Mշ[IGGXבsͪzer}ÕB k*8뼲Z "D@ֱ4K *sGm HEh 9ax(@ k]̹>IQLHeJ.ibh|87SaMqqFdس4>1sBq"JpƔYg@BRhV"NJ&5-1`vielޘZB@tʡFY,L0Bsaq'KQ&:4a  Q1U!mxqn4}PQʝx; 5v )+>4_s<'2Vqe>' s V-|JuJ2|B+ w ĚQ$y|[Gt#}2ߗ=Rcۦ"31-ӎq`uiX6s [)]8f[>ĩ.0RBO& lc'"NIE'8-8}J{W۸/v/S.{[[^IL;-ߗD_B,'.I,KG2/W^cuƯX+'Ag bHMۥcLZF|!\wH|033##.Qe9ކ.%goս{q.yTc/?/vB]l`Q$n @Pq(#Fj N>BWx17LX?)R% :\\j4=\jEġs5ȋ6M`G]ȫ5'ͷjBI4(A"vdK=y9;aewJϻ)zK"Vy}{³]wla3.t?cx QwR%j.ʂTE)U%=FRg`Ahk^//y`A^&Ap[}w\0Y1MRж⥒P-< kqKf>z!ܔ%_892nm1<&; sp5߯>tai&ջVKԌ蕝y.'YLVd'1R.MpJƦizyad-*˳]`_Nh.d]<~j7`u5_">MUWhb^K0 7 +Z2arLx^m@/hp]~m7:y>'V#K߻{R9|{*!wJlG4Wa%B=ֆK80%%%&/&)nbSq'=)8%\]уyͷ/6T3V嗪Wl=+N) ^TNRksw-t4,@o9,>i"7;6ξ.kM&6u}:S/u../vZ~1pIHE~4&VoeVG#,Y4觖+GL.IjL Zlf& a \HULk~:jJS2L"-XA<b?n+_mx;?'[tBGnb[>^4clmH8o v|G -gx!\@Dck]..9#m &R2/>Ec.Ȥ#e{SmvgiNLScd߸Ⱦq}"ٛd4G( )ݚr,WfYQ*3VP$q 5j+m]M~,[p&p;I[=,]g)fCUꃩעz8I!ny'gՉUFLIHhf6s$/sSƔF[L 6OEZA_dC5_1ǰ<ؑvǶӶl#0L^{o^Tfj!z (]mB݁] ѾLtrf&(a:ڀtͨ6 ^z1'_6As7h=_6@s= ʶӑG8qݷcn[_٪ϘC&ǂuvl JHujwztf)52Ճ+0 j+\^TO )Ϋ)=sX RN " wY8Q gTB$k2*z:6"ɲ)7LSNa M 8*bXS'KieewSkd,I +O_؜CT.0#j+v8ԇf;yFK(ZtxkNg77&͘*dD I"XP`-2uz,؁FUqPҴY&o.5T y1`~OR!yJP9`kf 4,F0h.5e̾I]L4xufC4׶X@BRiH)FR-UfFsSbxJ 1"J]Dzד BhdN1O(UҌksZg ѩujn+G0s7V}U~j u.Y^Y w# 8a[y wL瘰E) Dz[j;%Z (d2jr.&lw R.*cR_ץFIgzJ->7qRk\zQ/J(UtSF1RjC y)ȥ t-5V(=(sו)}Q/JKm'(=(Qj%.*{fsɟԽ ~< Ϣi#!R zS~5W"^I{B=3B01Ϋ@iu A6Rpo1^0V_|yiDa$IٔBM99ML\)0 *a>ދqQN@`@IZv_#㒱u7ox rs\Y tW=c k'1_m1Oc~ dRv2 H!yKMIt㮛ԴdVu&)7pf a2-0SJrމ(Ln[0١&Pb&cm{hkeCgiN OsT,I$+:'4K򜪂& f]12:ry?e)Z-{~mUWeKR'HoJv5qjb2O1ٌ^FEBB gnDTTA PsiL$ֺTᑤd{W4YvcJ;QB\X3&SN4ʼTcD1fAq鰜=,;cϾܬ?Ҕ{{wHNX')!?'w])(ۓ.)%3GI#&~&Oߺs jC~?qFPq*mO콜bWpP=nj"ZJo{ a?fqH9fR_H-شs֏3J]aicH59Lۧ[(GBt7y[߼ )i)J5E{V& #v)ԠXY7,jeZ <4c-ֺ9P}wYU\Dk)BxFTSbm:n}YV랮[|gvk#BxFŔqd7ZH=avO>#$>3!o,/rK$yv]yTo%ѰL3R#qϻa<{K&n=(cH)q;J8u.JRJ- t1J/JIMw](zGj$0FYGߤ\'D6j9JRjň+|Q$஛IAupo?~|4_}ʶ'FXZ]oW;}zjʌ0>"i+ܳ {\Ri=K{ףR9|4Qb-~O8e?q{}"{|D gHFi&iBȺ)BF^@C֪ˠq;V&x3^Pg>И"5#A1ϯɫ$Q Əv=}';C~yrA Toy)'ZI-t%S]E8o~']sJ !҄МĊⶐaLuZ !"(DCCM ;C=탫^{I7֚E wU& IHܖH&A%BX$ʜI#sɌfINs^1&,3T |f$9c\A"O3LaC4fLn_ 9QzQeuf| I0R6TeAD0幗VJ d?(uǒwSKn:8ĭzrZSFx\bPNAqYc%ΐl|%A3p)A%KX»Jk퐣=Og7v跑jYnd:2+`F NoϝKSΰ R!uK cr}w MT7+K쪤kآRk*4y9ߐZk2fxu|FPF=Y,rF$MDtubF\X60n-r)_qnD7IUcg]33l۞1rSf TքW'JG "3o"J غ9tV5xwkb5$7׿]mϝOE⻇v7U4̳Vz,q?\]|t6{wjly]?{emd_=ϢO:4ךwڲ}Vl 0d%^D_@ea rưTn%o~ F gɅȋzgV ~Jz"=rI"ąx)@S[[yksxu_|s?+a*QץH #gċ]gT &>oKKKK1cv+0E纜)omIu%yh<*p_vtM\qz7^D`nD'%[z:(g_𾺟OWє4 ',h5z#7*Re`$HIe%0Iw6M>5c4 (9N/hp=^`4aFI$)4< h*I׷_VX( ' ҄ڌ;7ڽ6k^wW18Il-R1uztCu{QqK_ %eB%AH>W99ϱdl^`]Ҟ:Ǻ[=yRRpiSӯv< $%4Tc7/li:N8~M8d絭ì%,q XV9Y*t}$ 3rq@;7gN{dbOsA!T6Ns@GkH.(m)SLS;mJ=M1/>:~s&T%Jc꺀 2$u!e͠ W’J;DW 1 FKr#H-u]xUF5KQ K-\hRk,5+󒒜U ',s^[܊˩F aTHC++E)%*R/InCqbYcYcYrO!H9{ RRƍ̆'(TPWЂBu)LIR,c0ơqTRV|/0_>3_@}[]=+ Nگ,@y˴ `7 )nqo'F JnWx X7k `mo ]Z٥*<ϐqZa~)|3^ՕSl"FDƸt\?}} \ \Wޞ[RtA ($/90I~MZl';uSZ:ໍ& .ivxpp#>=ZJ͙#T܋ x\k2 T8\3azmQpyJ>i fv DbZ@GUUe^bqf% O{󄴕:zYx!ek&u`#?\)t=o"Ͷ{Չ;T"̺w0'yV^Dh-񽓼t$/˫v1Y@d C A%|` "t$ p x 8],9hcmo:wmA@:SC±Ц>] Ph~:SM v.x7 Kmo-PQE 앚+ѹrݥf@;ߐRHy3B(NjY( )<䕻hOum7Dj>#$6m6!;'zF]4ʧs3 Jb1qgD1wB[|igtK!E[>Y9¹O+hbkw}xV,pXP7*̈ܔSFQgĂgN&{奐OKgp!ԍ|Z\,]{'RF ':REwk&t!5 ?/eE%q^ʤ2# 5$`#{/+yUe\3Z!ŢJK*Y r%hA+xŒڭ^vj |@]\)7+\fe.+7V~>Ϯٕwr{g_XecҪ7?ߜ}Kʈ= e>rv_֯8{,| +mU?ܹShc<.|2JJ«X!ˈ@ukiڂQ/[-x#"D{sx2«>dapxw)|_Grk%rh%i5Z{g"3kwWϋ0>^}^m4PDEαVT. iByAS+8ls@ v iΡ*27N)s[C€,̕5Jym}"3䲢R*b#̡k@uXMz 򢮊/e5_Mo 3٪ݜ!haoky48okTG_]IJ",ƶXMh.F7v}F 0J[̒;gF[E1@N$OnecS}$"V,s ]݋]<8Fe׋EķfLz;:o޾Ph&QD#e]y_v%Ѣv\Ei5 dW^,`ZJ0ᚒ-,9_~`yҊ̙ݶؕ.-]+u g޺#]LE>O~c`&ܺwd\nG̶\/rJ5O:^nIQ }i3SsooJh6l̤5iFHIAdfyȗ<@<6L5)rx 4zwOTf@2ɗ{G[ ll6s;aov'guD9-Ȣѩ]^ӯ1v~0>zGgt٫sZpThBP^*kj;{ ƅrSpkZIyl9p:jӁ@rٹV5?U턌2]31!_D@\Yq Jյ`B!6v)Ui" E*iNem+laxzm Vk5t[L) xvt[NӇ:O|3#l֮mљme&G۝ޤ`ZkٞlqMRg}Z,L5ֳxՎĮ`ȵߕ|JLJ n쯞dc.,|o)yה.Mbk4[A.Q@ ֮Z?W.``wCQlk\OBp^Qj$w^4tm'/=J/^*2e +I 񳱞rs^5NmcOͅѿ%$hJ嶶}&U1ںȍ*PottBf%/2{?^^zdPa/jP?uKA6\¦IK{?5jSPKи{bCRmH#e})"툟#b HtGԚ;q1Z DȁͿʊQON\ys+VhkFzAPlCpN@0Wr:yU خgS 9_ş;T:F lF+H a:lZmR I}\j"IQy)%EQS,G5:Rsʍq7m7/MD~fcf;% ,Zu?Ni1[7ޮ/Is|En{u6?GU.v);}y~o-j8^p2ėk{AXuLljwՋ_1%rFaic;ԕ)$=zX@'3B[/Pμ[|¤wk!Qb0wmH-V>Љ)B yROB^9Dc*Y˽nBϧzh^tO `^V)qĹbAJIˀDZ JLjRPPGTSR/Q64@P %-d(RjȄ1T.W }n&˪Ԋ35t(ͩ0Vg hd\_fi|ZeUR`X ̭Dh 9sC8b'ѭ/m=j$e0I!;a6ɮ (> Dۡh5I(8-~k#ɒ)<.Bv@I%f9 !Y"i}q9b>-m$0c¿D51~C4üqT `,xG4 'á,us 5a왚uZS[)+@B|~ll3:THS쥍{3 4@QAga_}3.(`܏{mFf6 s~}sipzT¥TGM "˪Д&7$yB$+xM{q`lOr.0.ס/ߥU_ok˟ڕz>Y7[>6T/B}׫ƫ?Wo_1OIۋ}ntT^, ᇀg)%R8$k9ߺLE8J9E| UrH ]}%:iv3/V)K 25Q h |oHElcIGeni L!xRjFcUɟZƉ $i:/c a8 /k=ujZ,B=-BH CLͦ q^WY`?foKl뫃ڿ?6ض@%ʈexo ^\/p'O..7/n/@@Drh"y^Ha۳H30֘ Ť 8iƴݎ b[Φnmbv, D>*Utx)0\xG ωJPX g3DBY+3U"T| E _;oT|2RcT}LТ`RIY];-ASwv0\ I^ R+i0PH.2a$HfXpԸ :E`_$qK{?%@jA'rqSϼXD`.X_Y RDb}gr0s{=Wo a ]n.αowrb%>k˄@RD]i0:t&XQJ9ƍblsnS T1h$YF-RnzM}>ϊ(J뒟3) O~.L `H66M~6 9+'ĜE|KC"΄qs t΍ J~ޠTїrA1ٷ:]5;ci&gka9_}mx9nj)gSmOCT3;PZ><8zKm{bfoip]G`=ZHunkZs+EZiբ֩+|=T YzЫ?rm;0]SslI U4;#ڳ֞9 >2oJ94‹EI@OàPb ͥ%R: s,vÈ̛WuaûΡbO j*3[跪]Ϳ̦bHeϥ Sa!yc4=&5g̣jqf κ< Fg%Zؗ =SM~gdr;rnu*eL~Z |;O\rL;k !"ލpEbun=;_@+hQBzn:Ngx>OԬyx2OB^9Dc))1ʶHFM<+g-8w7r*I 5I jΑxNIHrx):) |Pj d(=-*FJdžR{^VVTLF}J PJJ(4`P:F2RfIJCi)5s&ȧL Up&PKVs PFjn9'_:nR",J].>QzJeUj&sgIJ_JPJGR~(ZFJOIJcF)^ xi\KPzZeUjEKǍR,PzD@)R9U/굼>B\3@Y.3i<ɌCM3ZId`TCRk%bd _>K\p hw>Kejmzuvm)rME?Zg(vm-y}^nWw7WO|^|?x!߬)lQ~\Vl2Fg\)u`Jocez}d>;bd'rxOO-d\<)R`:捰je.0k ^Y[L=dH艝%(X3t<#&qR<6IK3iуQڙ)86f}1ShkJ+$Q&q::w8OIfI&q~͒6rO1uB(堇4/z4}x&PiQ~kx 95;{C>mK+'\HgVcp.'}U|k+֤߮km!S% 3Xe5b*clȵrb=]v #c&Qwfr<>&b.N:4WsfGCղ0DB"~%"Om3f+Mޖkii峔弤tt>7Ǯwɭ‰tcix, ڞZgIP!{)^C,HDKB"Sʘ]dE/RL8C3NLT/̲"rUsMVyhMVs#JRdR+̨I 2T2Q&0AR]HRPfԫQi1j,ӊYy?}>^ =ɯyj1,J7z.W}]~󳊘SL:DŃ&HG)afO(/C$0 c"̿6 :;yi@8m#4YE;!Kae/i41aJT"JHAX*33% c)!L5N#PB@>5٦YĜ=O~wiy<={ঊ5Uǿ{z2RW۟//J P뤵u]}_@_"|n*d+Ag݅q*E?8N^ASx۳/o$!snVC9]'7؜Yd)R3&Fon̻Ϭ0z>@ It1f\}V^JAU6sc@]tݭ Ϩ9 YK3@4 flN$buW8O}AU]vF0q\j<sűĽmPmS/a~yX*ZZQn3`$`a(/Q-Y{"CT9?=v\ڊ r 𧖼>w;ZM_:{ƞŻ=έx\6cY#ɤmݙ;~@=vn4?Vͳ*֛w/s?/6$؉L| ' x|sc'0BŠ=:$Y0B1Cmf&kGTH-PdjZ2%ҳi}nιYwY_QxQ8jn'TvEHMmV%LaSRZMV(ԊP4t(~ PԒNGR@^ECCJOKܗ|QJ=Q]Yo#G+D v!D-S(U͘4_OURٙ qRS ~LnYآ%^_*7J}x6U"( flT[)NaBn⇛NfEc39&Qv@S7 q(dY1X 8L!U `=pr+u pӖs&_nS8}Ei=@Aقs aTB6(50 %;'svXZ)Fz\ڼTƫ:|(b^^ ǔ@mpP>hk"!Xdz8\)Ě]șԙ,9c(!1K  Z?dhrINԺ"PtC_IA;+ʿ ptF$"%L90,A,e9w?rDIK22")%d(k UQ8,h24OsG!:1GSi2PrBѥVhw:"8M3Zeې M/`,%2МKâTdhk%ˈ,Ky©cp .>QHv >A)ٟ I>5jh@JkA1z#6-l;t h*kPm iܙT B3&A0F1J)O)' ("W$OsǺJ&jp=1!T38)Y*0Q˙ c:T)64Kl03+!FK{S\- y #_F<":]߲xyu\QhxfH~q>̜HoοK^{/y]n$(!LdPҰ4E C Pk.yP [89,''.ߗeqf>!c?t9ZO9_$לSǢhVqYǏ6f4Q-M kG,'5:|8gi;>֎q $kXE䁝%TMni٭o!qUȖ6 (E=qKҮ>:"PN#@ 76bCZd[ioPCHt2ƌARpB$p c0J c2wqnh$0mn\f5cFHS֭(Z[Jb2P4B1  95w ǧ9 ՚{aoMb N !J#6v5q;ɚ$'mٻnĿv,󬲕dOK=oѷo?=y8]~oc|?ُ̓{2B[mk Ss۟\ا'4QFC:㸕$g$`oL3W~>Y|r9NbzPZ廚pO'46V=?vB[l$jq_m%L ,ʀJJjV3 E~nu܃&Ɔ$gpXT+p`eس3P>yR%pI%TZV (y4)FG n[I/̣-.2͖wn;P;M0|P=ɓο~d+m~9D^?kﺯNU'SO S{kze;s:MoWxr ]H|cJʶ]/JzM|nvwG޿ּ/w훋/f^>}roW̊dd8bcw$D#PFeH')e6)A'AY$*Yn]ee5I0$%yb1b> t`Dh)ߎo !@8)ŦUOApB(u{P"*;O;3 2+RHqqU}lBEjᛮjQS5V@JV .Ϭμ@ .I 7}Ѐ0[-4 έgžz"@.NB*:jиKZݿf*'J$ɀ@RDXb1B-KRj)*LurE>!a8u|j\Sq>zF % D:yon0EA)RjM8sHv\'GR-װ=EZt(I=8]`uArFF;C XXjk*PV-{&YP. LzqU0}WUQuL?it觹B ;TFぎ ?|WzW ɇ#uQjH]K&dm'l Lu{5k~ֱqDvCcsÝV9m][UpՇyiU4C:͑&. 'ѷ6a't{Mrz"lj%њWCJj0PKnD N(J}O0b۵֡`V(Z9pd 3hmmQ!sw߶T+^:hr >V#_FU,DžӁ;Ijn&s(32-*oߍ:"_~cIgҿCް@Ty*$7.ft=Cr"$p9 +GUjd sVs~us.,zm#c㛩07v|~annYgw|["dof[8UTs+4m(yZmo?7k  DhC(FgyP~p'd>~\$¦J)OU1*ˉUr< [TUI#Q**阅-*Ğ;ꢔxJabZjH-}fN%h.F{kc8FT¡OՈJJEi}&2pm^[v\'R>]W3LhxΠ.Gc5l̳b Nq{pK~@8|, ˛))HD0);˻^./}#ܑzGE,[ s%]A켹x2F~f\#\dw~?38َ XLsz޿pw7i (-N؊M?|y}XDA[`?j\>9Hu΄r,3eqt, qݏQW‹ J5f[ܼX[>ʋ_8ݩ-<7%#9H;5 %?:ުW[S:*iVGզ4mIm15Q"ɢ/>oG:I,C4Ԍn!.ɥy2/'T⧖TҲLJ%2L}[r +⒌5RTz&󛩱O?ӇSbzcwh޲e_{˾.[f6㖤4Q⛝& CgyXYji,ѣ<7?z$_a}K.G!_}QMފ'[퇘fX 4q:~4n*gUjLSgRB* RTA`T;bN3.$;fA! 9Uf$Ch6";FDU![([>!8vp(.' Ex(Bu}4|7ۥE m^{jQ ֽ}POꮭ9ևVvmն}j[>ٰ@(n`nӝq;+jcQF-͘qƐdBް %S0EUTMs%&:KȤ1ys)u$uȵoNڌ*sX"Tc  cX * ):xũiD J9 ҂j&鴭?>)giRJӶRìThoLAX>T#3)[)u&b9otq~TJOJRV 0+]Rrf\y} -aTKBt{lhY ~>_0+uT;+-=[Y) aWlJYE#n?[I[iXV-@5eRJOJdy$0|>T#g+=m+f ۧJ5rvKOJy`3/Q)B&JOJRZd 0+]R|z:m+e*J*v|a  E H`?JAkAZ NrD*smٌ`J05:ӊhJACo'Ԁa)Ն^.?DI-=Bq}?K=dz~᭖>Wt}[V-h_|;HhEu;_RnxliZ"}Kumyrv"PnuMf\PCDȈLo)|Vv:**ch]~QJ#вiϤ Cl\v/E5c>4BoQdK8*8 ("wTm(CotP@UQ3 /B5#.2C"Fjydx1Vt9cZ_y& *"S^so^X1tϓl${Bx;|Sg 21fΛmjӔӔ0Mڕ3oP9GLXM3͍–[J 6Rp5jM4?|:H3] @B{[MU@I)E 9 zDfCAQkϽvAQcVԀ~LB|osY۸|: wC\rX~śIG.?~G޽]||`H+%.Ny\ѯu}w~/#GUO~%~^C3jET|O x$a*FNT*g'ǒ)9C>5`ѓ䶄 +,}@G @< 8SGsnK1&L ]i` & d5c#܃Ky5%,&]v_ӿ=--^#xͶMPlH1%VYW~>:ʵlMk-y%@ܟ)`P3҉efom4\fq!.cIs7}uN0RwsSm||Q^׫q3.WRƖR qssq3zp*6O(4bj䯶3Y\{np*'0DR*Y .]Z׀T!?dw }B7ޙϚsƺwi- ΀ lB'>da{(qpt5۴~yC̍gvw pVa1%Ϛj}ݲgM0{hacvR&^TH:)CIjXވK4Emq ySwz0͊䓛J}E"]1Tk?Mot㱯Yxyvoq?3$дr೶g|n9 a: \]Ssv祛WHY!E=nB}n]1acn;4f( ݺEj>8䕳h'd-'ڍJnڭ+1u6m^%t8ۭ{.~vC^9xJGTh 1~QD""ӛ3{Uu8(L±Qˤ;3:?9lfӉ=-> i9vm,BQVgmR<ZcEϊkk*[(7%9G/.&[MzV/ɚ]Fų^BI*vEG2Au"JpD鲈Tr,d.D(},A)_]. x}~oR{#Hj f}Oϐ5;=H *Y*bMrfEZ+1X!ڑ^[0PkSDɽ EC& VHࡊkZkHo_ zT-㝗| c)v_bO*)I;.zAasǽM=rנGI'%cF%EM1lU;Z!wbs"R#YJpI$b2n2Z%BnFz^i+-CAbμD1%2TeʃP딇E4 FfBs #VO`01q-7O/WOTG Uٯ|C'H5va r&W{GEDžl#0i%($cO-rEŊE3VU-1(BwDR*xcQi,L`,BʤʈȌڤk1ĔN؜#i riB%2[pZ҆ o׻=P~jG_,p ^ p"vG^ݎ&|}>N ĴCsکWdJlwKh ~g(9ԙ1HaFRbb3-jXU"^۾7 @dw/8k8xB |MHRK#DHWd\bޣLW}@mA.5jXكN+IE&5=z^A$54J Dvl`:%~$k; n<3N"H'BV]+$BFX~: /93`";uk??t$"HE/HgQŒehEIjssTޛ Fcż!¦L0$6GX/9gY.gɋ6e:YR2bՋL΃oʌ Z%s4&[1f;k4 N͘83hťLmp1 D#kMeFx@g2G[AԖg>I Qؽ9o]0DD.24Qd ɩ e8x,3 )a]`W;g"AwBm0d ox>a&&qw`ْ1L$up)u @PA;4Y!s"{f`gdT'ܤbv3%uy$)p3Dr5pN˨QDpyEtJ=sV h v-+Gegsknd(GusobSߛ0EO5c"Kۛ'P84t87w͈Q-%Z5x^JcƬ,ynFzXZ.lDJd'`a KhJ3<Ȇ\jW@煯ZA=]w,ԍtW  |TrMH43\ka^$S⥲`g︶@n됑hva#+)êsY*#H:@z"b)3&#dTq&#G p!\I mNO;w l:x-g~YfNoIlkf+pY،+vrJᓜ&7 o1>qD`U{B|$zRI<+  ^0g\)H'~~+_SFI_5.UhKn&WLKށ"q$Qp|$QV V>r|Ԫ-=j$q &cOG..^ɝJo/\"fݧu1e4&wy|xȠ0.{cDsssNYAmVX^&SŽ\B˕h!lhs-qbεkXJ7f#,1HI`EEdF0TitK]q}Vڷeۃy#3o캵b_lx+HufMs9$?ݬ;y$Yl_盛աɇo$>]u%eKC\ӃSyQtJURUe6㵴?78^" +wew$7!,Ty¡p[GA* /v%xڂ-1S==&>(DJ&z`C} -*(Gw}-gqNYpc߹&R),6DuQ`BT\V\ѹƳBJp51A"L@y؀kؚarQf`i77חWj~*=.J'9K:,ĿEɟv9F)iNϓ$i2O'/]`hF~do]@ HE >H㔏td)F! [B'7MtǛNԈIToOKj}EDV.QKʭ8k0տR~ VRnhqQDbvF0}aT{`ZXb"ZRM  q_N\M42!x~LL`mXdt>-xNԙ9VΉB5ZHZȱ@\?{Q:h>%`&!2CMi9tEguFTjL2^&v=R@sVDRxDWxDxٻM5ZSJUϫhVе:g5yj?Za}( Hg=rIrГuud90Oޘ ώƁH!")~fԄ@@4.-^tfGU :Weh%h 76/˄jw 南M[AS,~=G\t;!k BgG&BnzYV ]Pۛ5u/v˄mj g/iHr)[2Ȩ["`+^&5D^]׷&m_1?kà^VW~˹>{'瓢o7yS+[jƲ_wp\f)cɜa] h r_o\VyjEU됶>2lGewR` #(bIJ&a $-$n$͖$yֹ8J9\ʝW,R\w^U79׭n?,E[ Ēp;~c@Buauk^XVyaZ! Y1 {w<ǫ5hūuFrպۛدHfz<,aIk>;!𪇷q-A)3kֹ82U8jP"Oo%zӅ (vqO$4W!l5q(Ɩ>Py=kؕTZn~wpkW'`kJjv:P}_ B7)4pzQQj|Y!zgCĤsB@2Ri-6.z',}c{g !ZZ;,kw($O%*e5!GְmV{I:x)j̲x-F0bLay ||(;)\'ʼn᎔ѺӢEͣ5oxytŋuV]=/윭x+ȫaJP.%Eξ1g?Ĝ*~V.tgP#:}҃R]c]]Jl@.F!tG,ކ4ΟfF <T ߛUtu I;gh}r"!⟿KEN3jXU܁GK9+&M@j>YhX?v6@#354&2Wc~LTڠc쌆Gρ+pd%7I_620+Ż[^m͝2(y7Q+VrypԆ7pr hiEwߚJh 5kоNR#s1݅m1fMZ{ VZO9~}_e/0𒞞O"JH(5ֶ/SvXcXs;u=rj3GPu cDȾ1(!5C1DXC\Û,btw^i݁N"jY0 BK#OJ#  ̣rܫ(B&Iupy"_5]U݄/raN&8L~?Oⴷ3~'Tpٯϟ6J9lPg.Kø`ol*^)C~e4ȉַ]79N pMLFhˍ}$ ;[HN (%;7p[;ԝRIJ xw _Y.>`_*{մLU|OF٪_]rId$_P6` ZLpגύ~;|]l:h^^T"-s >w v]0|1}w"qjc9 K^4=x~£J Y+#5$Nrr7y,|B/Cv$=g"'sdY̆Si Kҗfwh2?aA4 ff%Ot+59G@^%&?D&P9Nhdї'y4,d9&v=: \AjA~W/(eC "% =eHdl2K_y03 ec>K2{2W=9 S2X,EPFhJ! }+ڏ5ͳ!\t6;O=y&BS6 Aq}ܒ+q.6%Ga7wWy2S~UA1&"}WK]cg4j)bnOyU5Rfed)I@N:-A#H+JwƟ?bN{{H-XمGZ`4COf@ܐ2R^Շ9E1#?|ef6Y )4~3Aq:ü.( K̽ROSCt`K }LY.$.vLGaKyަPi+;h OODrC C. I9)jΚ#QuEl c /GןXg_q1jRc$ Iag(1*St".VWeOݍ~ 7]tP?g){$p]$9Ç/>#򼇙QN|${T]!OUs/~"8(1=71(t (ǐI0Id2$rtκά9 GKNl"`/)y7Ir~5sƳ, RīIj*C}6 5<~?.ϝ= tT+jOe,Y o!5~tTPyO`B<6ž.\^4Vpawi1)fe?lh&Rbeed )>~Y2J 0Jհj/,0u{kA}܇hR9MnCc^?̙ {xa\Sgʡr#p&p_Uvn^G8~ ASr߇^Ze&Z(# }i-MR@aJqWː=X=A(Jnt9M#N4:|N4iUT镲Ê1Nht(e:{s= ;7' v?_0k5dk>^^K`X5H y^Wz􍵠fѿhnb>S/*_{ bʔYvj.W ,(2 ˉ&'wZH>EӔr1mYxN9[U)*5gg޲.¥lo᯹c F}rLf EW TGY!J,Og_3e[rr(?kѱ|1Jidhzx49XR Bμe.PEM!(MPB T0+ {!ZIjlOI;JIW%gDW:eNK\40*QM?C&mP0?ܿ"ڵFqlf{::OD<@@tݏ_ o2Atk;@(qFzwW?"kK$mr`QP\` dK[݅ N*EN:&_uy616y̻p΁ҙ ?jWW 9ccK-8EQ qeArj8es[r[-&ʰTՅkfl nEov ͗Kj?fΟVH|x6XHk?~kz5c-q{7o5hN1woᗼPS~ӽۃj/)xo?_CFxx=8yuH^d/VuJ@[39kptoݦ݁.ߏofr[fuž$`K~6[=6V-YAG[Ÿ L!VBq8=ez&r~FVW`4-l|kal[S^KdL<:h=G] Ɂc\=Σ'͈i&#H`"ݏR.W`\&, F< =ZuS 7)G!v:8ˑsL8c'! R_D"M#j)s6Frz~}F_݇V| y`97?c 0^|* N/b18{!52&R!mDZ.. QUpT~~eOf?a I1~^j~nJE\s'KhւaPk22LTb'*~rDOT0·F8'z -Yƶ𨖜duAĘVd"J'Jˎ ZZE?/Q,VTHa Vhtb& Z:@$Li1 qiX4`)U!$5=U|u_wo`_TSrpƋ( EpP(NT8N3W4t5{-o}KJ(C0`BWxxEB2E(KNN {˹7"HUB \q ki8Xp#3!Ocu?%Ab{VI{E2Hj7 <8D P Lb0QZ;($ : 6E VZ{ eZGO(Uڈ!L8.%r$u\:Ό^XPQ@ XoaG.6VaWqc-2@.x'J8QR8]qDU=Cc4HXR C͠43WC9O0.+逐A*"`Pr|6xHz7ti n\ R)/- M@_.x ߳M Zq _NZeSdqz"ՅG{A)K{ѢA5a }RGG BB)^ <̤ 1L|8{܋P\+SEϑYnL>5Ae,pNz;yKbfNO *Þ )Fs'9 1 fylFSdm>脇vsj$a=ImG)f7GR[8d&9T|R/ğ:?y8ϸCsZ[uHRjNtהוcvY/˯ȍy|z@ɎK- 56 Ū Et.C8 Ūh$Q œZ&C&{RN]}Gk2:rE=C&7-ܺ @ b;흝 m=m!>|S,$EdI{yμk֋Zr9:2HhQis>,D{6gUrVEpZNhrɫYԪ mqSUeu)e;e*zi*N:}Qh%EJY rUlU 5п&JѐmtJq{|˺1y[[rT3Xg%ݷuk,Ӻu!/\Et*W$sK:J:2Tj7Tpmg쐽bFa<-}H·{߾j0^rn?W\YK ?Xg'-^)j?J+ЧLʋnl&-*z[^^.|\&_/T6$+V+yM/i}[n&ctvrE$~G◗redNC4uY8@sdEYh IqT=*<ϔEe|4qT ߌd]ۇ}TUxwnΫ}yv2ZO.+;!)޹i]yek4)OATyoYXm=̓ttڟXD񯻻~V+L=Ɇ6)I+0RCE({ޛ+sZkؼ&1ޒʔR mk5d-=,7i NMnadSF={3{Smkij<=˄b@ yϴP"c R-NJ@EU?`'var/home/core/zuul-output/logs/kubelet.log0000644000000000000000005467176315134636524017731 0ustar rootrootJan 23 06:54:08 crc systemd[1]: Starting Kubernetes Kubelet... Jan 23 06:54:08 crc restorecon[4815]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:08 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Jan 23 06:54:09 crc restorecon[4815]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Jan 23 06:54:09 crc kubenswrapper[5102]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 06:54:09 crc kubenswrapper[5102]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Jan 23 06:54:09 crc kubenswrapper[5102]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 06:54:09 crc kubenswrapper[5102]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 06:54:09 crc kubenswrapper[5102]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Jan 23 06:54:09 crc kubenswrapper[5102]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.418517 5102 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421741 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421761 5102 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421767 5102 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421773 5102 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421779 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421785 5102 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421793 5102 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421799 5102 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421805 5102 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421811 5102 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421818 5102 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421823 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421829 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421834 5102 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421839 5102 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421844 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421849 5102 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421865 5102 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421871 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421877 5102 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421881 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421887 5102 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421892 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421897 5102 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421901 5102 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421906 5102 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421910 5102 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421916 5102 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421922 5102 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421928 5102 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421933 5102 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421938 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421945 5102 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421952 5102 feature_gate.go:330] unrecognized feature gate: Example Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421957 5102 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421962 5102 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421968 5102 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421974 5102 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421979 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421984 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421989 5102 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.421994 5102 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422000 5102 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422005 5102 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422010 5102 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422014 5102 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422019 5102 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422024 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422029 5102 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422034 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422039 5102 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422046 5102 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422051 5102 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422063 5102 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422068 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422073 5102 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422077 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422082 5102 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422087 5102 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422092 5102 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422097 5102 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422102 5102 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422106 5102 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422111 5102 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422116 5102 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422120 5102 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422125 5102 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422130 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422135 5102 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422139 5102 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.422144 5102 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422407 5102 flags.go:64] FLAG: --address="0.0.0.0" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422421 5102 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422437 5102 flags.go:64] FLAG: --anonymous-auth="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422445 5102 flags.go:64] FLAG: --application-metrics-count-limit="100" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422452 5102 flags.go:64] FLAG: --authentication-token-webhook="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422458 5102 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422466 5102 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422473 5102 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422479 5102 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422484 5102 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422490 5102 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422496 5102 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422502 5102 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422508 5102 flags.go:64] FLAG: --cgroup-root="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422514 5102 flags.go:64] FLAG: --cgroups-per-qos="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422520 5102 flags.go:64] FLAG: --client-ca-file="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422526 5102 flags.go:64] FLAG: --cloud-config="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422531 5102 flags.go:64] FLAG: --cloud-provider="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422566 5102 flags.go:64] FLAG: --cluster-dns="[]" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422577 5102 flags.go:64] FLAG: --cluster-domain="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422582 5102 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422588 5102 flags.go:64] FLAG: --config-dir="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422593 5102 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422600 5102 flags.go:64] FLAG: --container-log-max-files="5" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422608 5102 flags.go:64] FLAG: --container-log-max-size="10Mi" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422614 5102 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422620 5102 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422626 5102 flags.go:64] FLAG: --containerd-namespace="k8s.io" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422631 5102 flags.go:64] FLAG: --contention-profiling="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422637 5102 flags.go:64] FLAG: --cpu-cfs-quota="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422643 5102 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422651 5102 flags.go:64] FLAG: --cpu-manager-policy="none" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422656 5102 flags.go:64] FLAG: --cpu-manager-policy-options="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422663 5102 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422669 5102 flags.go:64] FLAG: --enable-controller-attach-detach="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422675 5102 flags.go:64] FLAG: --enable-debugging-handlers="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422680 5102 flags.go:64] FLAG: --enable-load-reader="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422686 5102 flags.go:64] FLAG: --enable-server="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422691 5102 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422716 5102 flags.go:64] FLAG: --event-burst="100" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422722 5102 flags.go:64] FLAG: --event-qps="50" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422727 5102 flags.go:64] FLAG: --event-storage-age-limit="default=0" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422733 5102 flags.go:64] FLAG: --event-storage-event-limit="default=0" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422738 5102 flags.go:64] FLAG: --eviction-hard="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422746 5102 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422752 5102 flags.go:64] FLAG: --eviction-minimum-reclaim="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422757 5102 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422763 5102 flags.go:64] FLAG: --eviction-soft="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422768 5102 flags.go:64] FLAG: --eviction-soft-grace-period="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422774 5102 flags.go:64] FLAG: --exit-on-lock-contention="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422780 5102 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422786 5102 flags.go:64] FLAG: --experimental-mounter-path="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422791 5102 flags.go:64] FLAG: --fail-cgroupv1="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422796 5102 flags.go:64] FLAG: --fail-swap-on="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422810 5102 flags.go:64] FLAG: --feature-gates="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422817 5102 flags.go:64] FLAG: --file-check-frequency="20s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422823 5102 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422828 5102 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422834 5102 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422839 5102 flags.go:64] FLAG: --healthz-port="10248" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422845 5102 flags.go:64] FLAG: --help="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422851 5102 flags.go:64] FLAG: --hostname-override="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422856 5102 flags.go:64] FLAG: --housekeeping-interval="10s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422864 5102 flags.go:64] FLAG: --http-check-frequency="20s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422869 5102 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422875 5102 flags.go:64] FLAG: --image-credential-provider-config="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422881 5102 flags.go:64] FLAG: --image-gc-high-threshold="85" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422886 5102 flags.go:64] FLAG: --image-gc-low-threshold="80" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422891 5102 flags.go:64] FLAG: --image-service-endpoint="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422897 5102 flags.go:64] FLAG: --kernel-memcg-notification="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422902 5102 flags.go:64] FLAG: --kube-api-burst="100" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422908 5102 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422914 5102 flags.go:64] FLAG: --kube-api-qps="50" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422919 5102 flags.go:64] FLAG: --kube-reserved="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422925 5102 flags.go:64] FLAG: --kube-reserved-cgroup="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422930 5102 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422936 5102 flags.go:64] FLAG: --kubelet-cgroups="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422941 5102 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422947 5102 flags.go:64] FLAG: --lock-file="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422952 5102 flags.go:64] FLAG: --log-cadvisor-usage="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422957 5102 flags.go:64] FLAG: --log-flush-frequency="5s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422963 5102 flags.go:64] FLAG: --log-json-info-buffer-size="0" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422972 5102 flags.go:64] FLAG: --log-json-split-stream="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422978 5102 flags.go:64] FLAG: --log-text-info-buffer-size="0" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422984 5102 flags.go:64] FLAG: --log-text-split-stream="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422989 5102 flags.go:64] FLAG: --logging-format="text" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.422995 5102 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423001 5102 flags.go:64] FLAG: --make-iptables-util-chains="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423007 5102 flags.go:64] FLAG: --manifest-url="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423012 5102 flags.go:64] FLAG: --manifest-url-header="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423027 5102 flags.go:64] FLAG: --max-housekeeping-interval="15s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423033 5102 flags.go:64] FLAG: --max-open-files="1000000" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423040 5102 flags.go:64] FLAG: --max-pods="110" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423046 5102 flags.go:64] FLAG: --maximum-dead-containers="-1" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423052 5102 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423058 5102 flags.go:64] FLAG: --memory-manager-policy="None" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423228 5102 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423236 5102 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423243 5102 flags.go:64] FLAG: --node-ip="192.168.126.11" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423249 5102 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423262 5102 flags.go:64] FLAG: --node-status-max-images="50" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423268 5102 flags.go:64] FLAG: --node-status-update-frequency="10s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423274 5102 flags.go:64] FLAG: --oom-score-adj="-999" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423280 5102 flags.go:64] FLAG: --pod-cidr="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423285 5102 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423293 5102 flags.go:64] FLAG: --pod-manifest-path="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423299 5102 flags.go:64] FLAG: --pod-max-pids="-1" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423304 5102 flags.go:64] FLAG: --pods-per-core="0" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423310 5102 flags.go:64] FLAG: --port="10250" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423316 5102 flags.go:64] FLAG: --protect-kernel-defaults="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423321 5102 flags.go:64] FLAG: --provider-id="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423326 5102 flags.go:64] FLAG: --qos-reserved="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423332 5102 flags.go:64] FLAG: --read-only-port="10255" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423338 5102 flags.go:64] FLAG: --register-node="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423344 5102 flags.go:64] FLAG: --register-schedulable="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423349 5102 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423362 5102 flags.go:64] FLAG: --registry-burst="10" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423367 5102 flags.go:64] FLAG: --registry-qps="5" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423373 5102 flags.go:64] FLAG: --reserved-cpus="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423378 5102 flags.go:64] FLAG: --reserved-memory="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423385 5102 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423391 5102 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423396 5102 flags.go:64] FLAG: --rotate-certificates="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423402 5102 flags.go:64] FLAG: --rotate-server-certificates="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423407 5102 flags.go:64] FLAG: --runonce="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423413 5102 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423420 5102 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423426 5102 flags.go:64] FLAG: --seccomp-default="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423432 5102 flags.go:64] FLAG: --serialize-image-pulls="true" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423437 5102 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423444 5102 flags.go:64] FLAG: --storage-driver-db="cadvisor" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423450 5102 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423456 5102 flags.go:64] FLAG: --storage-driver-password="root" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423461 5102 flags.go:64] FLAG: --storage-driver-secure="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423467 5102 flags.go:64] FLAG: --storage-driver-table="stats" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423472 5102 flags.go:64] FLAG: --storage-driver-user="root" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423478 5102 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423483 5102 flags.go:64] FLAG: --sync-frequency="1m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423489 5102 flags.go:64] FLAG: --system-cgroups="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423495 5102 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423504 5102 flags.go:64] FLAG: --system-reserved-cgroup="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423509 5102 flags.go:64] FLAG: --tls-cert-file="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423514 5102 flags.go:64] FLAG: --tls-cipher-suites="[]" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423525 5102 flags.go:64] FLAG: --tls-min-version="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423531 5102 flags.go:64] FLAG: --tls-private-key-file="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423568 5102 flags.go:64] FLAG: --topology-manager-policy="none" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423575 5102 flags.go:64] FLAG: --topology-manager-policy-options="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423581 5102 flags.go:64] FLAG: --topology-manager-scope="container" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423586 5102 flags.go:64] FLAG: --v="2" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423594 5102 flags.go:64] FLAG: --version="false" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423602 5102 flags.go:64] FLAG: --vmodule="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423608 5102 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.423614 5102 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423763 5102 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423771 5102 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423778 5102 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423784 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423789 5102 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423794 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423799 5102 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423811 5102 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423817 5102 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423824 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423829 5102 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423834 5102 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423839 5102 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423843 5102 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423848 5102 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423853 5102 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423858 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423863 5102 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423867 5102 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423872 5102 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423877 5102 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423882 5102 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423887 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423892 5102 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423898 5102 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423903 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423908 5102 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423913 5102 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423918 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423925 5102 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423930 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423936 5102 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423942 5102 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423946 5102 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423951 5102 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423956 5102 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423961 5102 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423966 5102 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423970 5102 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423978 5102 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423982 5102 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423987 5102 feature_gate.go:330] unrecognized feature gate: Example Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423992 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.423997 5102 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424002 5102 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424015 5102 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424020 5102 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424025 5102 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424030 5102 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424035 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424039 5102 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424044 5102 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424049 5102 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424054 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424059 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424064 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424069 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424074 5102 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424080 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424085 5102 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424091 5102 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424098 5102 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424104 5102 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424110 5102 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424117 5102 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424122 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424128 5102 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424133 5102 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424138 5102 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424143 5102 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.424147 5102 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.424164 5102 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.435435 5102 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.435492 5102 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438079 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438167 5102 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438180 5102 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438205 5102 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438217 5102 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438301 5102 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438317 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438748 5102 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438793 5102 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438813 5102 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.438832 5102 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439025 5102 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439047 5102 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439060 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439072 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439084 5102 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439097 5102 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439108 5102 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439121 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439134 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439146 5102 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439159 5102 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439171 5102 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439182 5102 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439193 5102 feature_gate.go:330] unrecognized feature gate: Example Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439204 5102 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439220 5102 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439234 5102 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439246 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439258 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439272 5102 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439285 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439297 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439309 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439320 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439331 5102 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439356 5102 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439367 5102 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439382 5102 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439394 5102 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439405 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439415 5102 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439426 5102 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439436 5102 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439447 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439458 5102 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439468 5102 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439478 5102 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439489 5102 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439500 5102 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439510 5102 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439520 5102 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439531 5102 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439581 5102 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439596 5102 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439609 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439620 5102 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439632 5102 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439645 5102 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439656 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439669 5102 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439680 5102 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439691 5102 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439704 5102 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439713 5102 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439724 5102 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439733 5102 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439742 5102 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439750 5102 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439759 5102 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.439767 5102 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.439783 5102 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440064 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440081 5102 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440094 5102 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440106 5102 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440117 5102 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440126 5102 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440136 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440145 5102 feature_gate.go:330] unrecognized feature gate: Example Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440154 5102 feature_gate.go:330] unrecognized feature gate: NewOLM Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440163 5102 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440175 5102 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440186 5102 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440200 5102 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440217 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440237 5102 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440250 5102 feature_gate.go:330] unrecognized feature gate: SignatureStores Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440262 5102 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440272 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440283 5102 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440292 5102 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440304 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440315 5102 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440326 5102 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440338 5102 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440349 5102 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440361 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440372 5102 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440385 5102 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440395 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440405 5102 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440415 5102 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440426 5102 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440437 5102 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440448 5102 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440457 5102 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440467 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440477 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440488 5102 feature_gate.go:330] unrecognized feature gate: GatewayAPI Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440498 5102 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440512 5102 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440523 5102 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440534 5102 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440619 5102 feature_gate.go:330] unrecognized feature gate: PlatformOperators Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440631 5102 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440642 5102 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440657 5102 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440670 5102 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440684 5102 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440696 5102 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440708 5102 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440720 5102 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440731 5102 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440742 5102 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440751 5102 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440761 5102 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440770 5102 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440779 5102 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440788 5102 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440797 5102 feature_gate.go:330] unrecognized feature gate: OVNObservability Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440805 5102 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440813 5102 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440822 5102 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440830 5102 feature_gate.go:330] unrecognized feature gate: PinnedImages Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440839 5102 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440849 5102 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440858 5102 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440866 5102 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440875 5102 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440883 5102 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440892 5102 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.440900 5102 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.440914 5102 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.441692 5102 server.go:940] "Client rotation is on, will bootstrap in background" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.444667 5102 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.444759 5102 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.445307 5102 server.go:997] "Starting client certificate rotation" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.445337 5102 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.445942 5102 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-12 13:07:42.972074701 +0000 UTC Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.446118 5102 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.451123 5102 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.452991 5102 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.453345 5102 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.468962 5102 log.go:25] "Validated CRI v1 runtime API" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.484050 5102 log.go:25] "Validated CRI v1 image API" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.486155 5102 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.489996 5102 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2026-01-23-06-45-03-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.490045 5102 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:41 fsType:tmpfs blockSize:0}] Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.510599 5102 manager.go:217] Machine: {Timestamp:2026-01-23 06:54:09.508347358 +0000 UTC m=+0.328696433 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2800000 MemoryCapacity:33654124544 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:01d84193-5d13-4a9b-819a-5818b02f0043 BootID:f39edb4b-853d-44ae-bcf5-b5b79110ef33 Filesystems:[{Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365408768 Type:vfs Inodes:821633 HasInodes:true} {Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:41 Capacity:1073741824 Type:vfs Inodes:4108169 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827060224 Type:vfs Inodes:4108169 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:42:17:dc Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:42:17:dc Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:eb:2a:59 Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:c1:5b:60 Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:a5:53:fc Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:14:be:81 Speed:-1 Mtu:1496} {Name:ens7.23 MacAddress:52:54:00:42:ef:4b Speed:-1 Mtu:1496} {Name:ens7.44 MacAddress:52:54:00:42:c3:95 Speed:-1 Mtu:1496} {Name:eth10 MacAddress:1a:d8:09:67:bd:00 Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:4e:f9:fd:08:f7:12 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654124544 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.510992 5102 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.511211 5102 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.511870 5102 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.512175 5102 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.512221 5102 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.512605 5102 topology_manager.go:138] "Creating topology manager with none policy" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.512626 5102 container_manager_linux.go:303] "Creating device plugin manager" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.512990 5102 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.513075 5102 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.513480 5102 state_mem.go:36] "Initialized new in-memory state store" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.513653 5102 server.go:1245] "Using root directory" path="/var/lib/kubelet" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.514756 5102 kubelet.go:418] "Attempting to sync node with API server" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.514790 5102 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.514844 5102 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.514866 5102 kubelet.go:324] "Adding apiserver pod source" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.514890 5102 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.517109 5102 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.517796 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.517867 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.517945 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.517947 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.523934 5102 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.524914 5102 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525616 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525641 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525649 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525655 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525666 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525672 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525679 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525691 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525700 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525709 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525719 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525726 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.525946 5102 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.526435 5102 server.go:1280] "Started kubelet" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.526943 5102 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.527040 5102 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.526945 5102 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.527822 5102 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Jan 23 06:54:09 crc systemd[1]: Started Kubernetes Kubelet. Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.528499 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.528556 5102 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.528784 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 20:16:39.961763892 +0000 UTC Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.528933 5102 volume_manager.go:287] "The desired_state_of_world populator starts" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.528962 5102 volume_manager.go:289] "Starting Kubelet Volume Manager" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.529025 5102 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.530155 5102 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.530488 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.530627 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.530815 5102 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.195:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188d49b025f5b3a8 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 06:54:09.526404008 +0000 UTC m=+0.346752983,LastTimestamp:2026-01-23 06:54:09.526404008 +0000 UTC m=+0.346752983,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.531426 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="200ms" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539079 5102 factory.go:153] Registering CRI-O factory Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539132 5102 factory.go:221] Registration of the crio container factory successfully Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539232 5102 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539247 5102 factory.go:55] Registering systemd factory Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539270 5102 factory.go:221] Registration of the systemd container factory successfully Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539339 5102 factory.go:103] Registering Raw factory Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539414 5102 manager.go:1196] Started watching for new ooms in manager Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.539641 5102 server.go:460] "Adding debug handlers to kubelet server" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.541741 5102 manager.go:319] Starting recovery of all containers Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553578 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553663 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553678 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553696 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553734 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553747 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553788 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553799 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553817 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553833 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553847 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553866 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553880 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553898 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553910 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553921 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553934 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553950 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553963 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553977 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.553991 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554010 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554058 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554071 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554085 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554100 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554115 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554129 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554144 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554182 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554196 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554207 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554220 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554232 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554245 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554258 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554270 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554289 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554302 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554325 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554342 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554355 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554366 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554413 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554437 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554452 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554467 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554481 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.554497 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555817 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555837 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555881 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555897 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555907 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555918 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555929 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555939 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555947 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555956 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555965 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555976 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555986 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.555996 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556005 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556014 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556029 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556039 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556050 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556060 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556070 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556079 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556090 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556107 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556117 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556128 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556163 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556176 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556186 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.556198 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560881 5102 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560923 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560937 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560948 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560962 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560974 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560983 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.560993 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561003 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561016 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561026 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561036 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561047 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561058 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561068 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561080 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561089 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561101 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561111 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561122 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561137 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561148 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561162 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561172 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561184 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561193 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561210 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561222 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561308 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561336 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561346 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561358 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561372 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561385 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561400 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561416 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561428 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561453 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561463 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561476 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561487 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561497 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561508 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561518 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561529 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561602 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561613 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561621 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561631 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561644 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561656 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561665 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561675 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561703 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561715 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561725 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561736 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561745 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561755 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561768 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561779 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561808 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561818 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561830 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561840 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561849 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561952 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561965 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.561991 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562016 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562058 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562067 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562079 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562105 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562115 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562125 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562134 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562162 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562175 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562184 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562193 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562202 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562213 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562222 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562233 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562270 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562309 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562330 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562343 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562388 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562402 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562420 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562432 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562471 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562485 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562498 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562510 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562523 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562558 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562568 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562608 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562630 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562656 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562667 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562707 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562730 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562739 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562748 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562759 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562787 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562800 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562814 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562827 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562844 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562862 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562876 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562890 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562947 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562970 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562980 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.562995 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563005 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563014 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563026 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563037 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563062 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563072 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563081 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563090 5102 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563113 5102 reconstruct.go:97] "Volume reconstruction finished" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.563120 5102 reconciler.go:26] "Reconciler: start to sync state" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.565439 5102 manager.go:324] Recovery completed Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.580013 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.582177 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.582226 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.582238 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.585001 5102 cpu_manager.go:225] "Starting CPU manager" policy="none" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.585022 5102 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.585045 5102 state_mem.go:36] "Initialized new in-memory state store" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.593796 5102 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.596226 5102 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.596467 5102 status_manager.go:217] "Starting to sync pod status with apiserver" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.596751 5102 kubelet.go:2335] "Starting kubelet main sync loop" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.596624 5102 policy_none.go:49] "None policy: Start" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.597008 5102 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Jan 23 06:54:09 crc kubenswrapper[5102]: W0123 06:54:09.598419 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.598532 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.599243 5102 memory_manager.go:170] "Starting memorymanager" policy="None" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.599291 5102 state_mem.go:35] "Initializing new in-memory state store" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.630434 5102 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.660080 5102 manager.go:334] "Starting Device Plugin manager" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.661828 5102 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.661859 5102 server.go:79] "Starting device plugin registration server" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.662377 5102 eviction_manager.go:189] "Eviction manager: starting control loop" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.662413 5102 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.662861 5102 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.665245 5102 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.665287 5102 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.682498 5102 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.697464 5102 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc","openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.697581 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.698417 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.698450 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.698458 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.698574 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.698951 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.698998 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699067 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699097 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699171 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699371 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699422 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699966 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.699992 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700000 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700049 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700087 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700088 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700117 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700307 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700376 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.700397 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701108 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701133 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701141 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701306 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701330 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701338 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701417 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701520 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701574 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701948 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.701978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.702103 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.702128 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.702430 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.702464 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.702475 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.703318 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.703349 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.703359 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.732479 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="400ms" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.763331 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.764380 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.764471 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.764490 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.764529 5102 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765530 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.765612 5102 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.195:6443: connect: connection refused" node="crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765638 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765733 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765786 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765831 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765898 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765941 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765973 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.765999 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.766023 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.766072 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.766101 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.766146 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.766181 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.766202 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.867803 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.867876 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.867898 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.867915 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.867935 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.867952 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.867995 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868014 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868010 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868070 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868086 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868077 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868146 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868207 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868245 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868036 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868219 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868036 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868605 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.868495 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869015 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869049 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869069 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869091 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869109 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869321 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869374 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869328 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869408 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.869434 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.966423 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.968964 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.969031 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.969048 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:09 crc kubenswrapper[5102]: I0123 06:54:09.969089 5102 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 06:54:09 crc kubenswrapper[5102]: E0123 06:54:09.970055 5102 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.195:6443: connect: connection refused" node="crc" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.030080 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.036014 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.057024 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.074153 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-e2b3c7684cf10e0b6099ef2d2ae086f492a5ae848afb524b69f49fe86ad89b7c WatchSource:0}: Error finding container e2b3c7684cf10e0b6099ef2d2ae086f492a5ae848afb524b69f49fe86ad89b7c: Status 404 returned error can't find the container with id e2b3c7684cf10e0b6099ef2d2ae086f492a5ae848afb524b69f49fe86ad89b7c Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.077710 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-a188d1c1b89cefaf9abdb31c5d12aed3f1b313fb9429ea17c015bf9d7eee9a5d WatchSource:0}: Error finding container a188d1c1b89cefaf9abdb31c5d12aed3f1b313fb9429ea17c015bf9d7eee9a5d: Status 404 returned error can't find the container with id a188d1c1b89cefaf9abdb31c5d12aed3f1b313fb9429ea17c015bf9d7eee9a5d Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.084734 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.088950 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.089744 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-f3204db422af420b3ecf89eb36d3e429ab946ef5443970e6cf36fa2b3ab5c0b4 WatchSource:0}: Error finding container f3204db422af420b3ecf89eb36d3e429ab946ef5443970e6cf36fa2b3ab5c0b4: Status 404 returned error can't find the container with id f3204db422af420b3ecf89eb36d3e429ab946ef5443970e6cf36fa2b3ab5c0b4 Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.107016 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-afe4a2131f4eef5155d5868058a6fe5c29eb6030632a8e0fdab7f7f7511a3a7c WatchSource:0}: Error finding container afe4a2131f4eef5155d5868058a6fe5c29eb6030632a8e0fdab7f7f7511a3a7c: Status 404 returned error can't find the container with id afe4a2131f4eef5155d5868058a6fe5c29eb6030632a8e0fdab7f7f7511a3a7c Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.111662 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-03bcf8e5539bf26845d658e7a6e5c2d86e119eea2610c1982a2f1295ec5f7d98 WatchSource:0}: Error finding container 03bcf8e5539bf26845d658e7a6e5c2d86e119eea2610c1982a2f1295ec5f7d98: Status 404 returned error can't find the container with id 03bcf8e5539bf26845d658e7a6e5c2d86e119eea2610c1982a2f1295ec5f7d98 Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.134148 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="800ms" Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.171025 5102 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.102.83.195:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.188d49b025f5b3a8 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 06:54:09.526404008 +0000 UTC m=+0.346752983,LastTimestamp:2026-01-23 06:54:09.526404008 +0000 UTC m=+0.346752983,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.370745 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.373380 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.373414 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.373423 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.373446 5102 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.373953 5102 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.195:6443: connect: connection refused" node="crc" Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.454754 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.455159 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.528342 5102 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.529460 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 06:34:17.816109295 +0000 UTC Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.602254 5102 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62" exitCode=0 Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.602341 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.602419 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"e2b3c7684cf10e0b6099ef2d2ae086f492a5ae848afb524b69f49fe86ad89b7c"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.602508 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.603687 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.603718 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.603730 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.606934 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.606972 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"03bcf8e5539bf26845d658e7a6e5c2d86e119eea2610c1982a2f1295ec5f7d98"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.609870 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db" exitCode=0 Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.609923 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.609940 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"afe4a2131f4eef5155d5868058a6fe5c29eb6030632a8e0fdab7f7f7511a3a7c"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.610049 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.610746 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.610772 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.610784 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.612167 5102 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="16991ef7c4e2e9798de7c856e9b62560281d8d0d75465f37c5d3c5106fa3d730" exitCode=0 Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.612220 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"16991ef7c4e2e9798de7c856e9b62560281d8d0d75465f37c5d3c5106fa3d730"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.612235 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"f3204db422af420b3ecf89eb36d3e429ab946ef5443970e6cf36fa2b3ab5c0b4"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.612313 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.612380 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.613646 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.613695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.613710 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.613898 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.613934 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.613945 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.615283 5102 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a" exitCode=0 Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.615347 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.615402 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"a188d1c1b89cefaf9abdb31c5d12aed3f1b313fb9429ea17c015bf9d7eee9a5d"} Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.615512 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.616531 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.616597 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:10 crc kubenswrapper[5102]: I0123 06:54:10.616613 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.699957 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.700098 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.866186 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.866285 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:10 crc kubenswrapper[5102]: W0123 06:54:10.919655 5102 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.919745 5102 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.102.83.195:6443: connect: connection refused" logger="UnhandledError" Jan 23 06:54:10 crc kubenswrapper[5102]: E0123 06:54:10.935783 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="1.6s" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.174614 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.178590 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.178668 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.178681 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.178720 5102 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 06:54:11 crc kubenswrapper[5102]: E0123 06:54:11.179454 5102 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.102.83.195:6443: connect: connection refused" node="crc" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.529641 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 18:33:01.294278504 +0000 UTC Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.541783 5102 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.622867 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.622923 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.622938 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.622951 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.625797 5102 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7bc475c02281391ef25a992c289514b9c24c6ea71b69f4cb428f0630308ff146" exitCode=0 Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.625880 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7bc475c02281391ef25a992c289514b9c24c6ea71b69f4cb428f0630308ff146"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.626129 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.627334 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.627355 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.627363 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.628356 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.628415 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.628445 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.628625 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.631230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.631257 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.631266 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.639787 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.640712 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.641978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.642010 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.642019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.643454 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.643489 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.643504 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.643504 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b"} Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.644219 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.644248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:11 crc kubenswrapper[5102]: I0123 06:54:11.644260 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.530594 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 07:35:04.228863061 +0000 UTC Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.654472 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40"} Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.654612 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.661389 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.661475 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.661494 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.662342 5102 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="89b9e6e788415c0c5865856f48aec9d8de04cb80459caa44ea28a17d6bafff3c" exitCode=0 Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.662530 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.662736 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"89b9e6e788415c0c5865856f48aec9d8de04cb80459caa44ea28a17d6bafff3c"} Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.663150 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.663856 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.663928 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.663953 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.665275 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.665329 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.665350 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.780603 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.782194 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.782231 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.782243 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.782269 5102 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 06:54:12 crc kubenswrapper[5102]: I0123 06:54:12.945305 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.518740 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.531787 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 11:01:20.468866436 +0000 UTC Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.669236 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"618174e47d627885e886c81a18b15e4835da14fc8fed85f53cdc0cd8292af605"} Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.669347 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.669353 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"6df7686715781f8eeefea267f33d4817317e649118c75b8a2e11c12f9af9de29"} Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.669377 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"dd8f61bfd88a3f0adba0736e9603cd8106a3477cf8423f1c85e96c2c638775e4"} Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.669404 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.670527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.670646 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:13 crc kubenswrapper[5102]: I0123 06:54:13.670667 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.532153 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 16:52:56.003937728 +0000 UTC Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.677864 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"d0a5eba006fb1221f5114e9a5b64ead75afffd23e0dded1a09b1b3f253b89836"} Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.677903 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.677921 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.677962 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.677924 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"fab913161b4cedd88b93ad2870017d7635744ac3467324db9ffda2f294c35a65"} Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.679468 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.679492 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.679516 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.679495 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.679557 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:14 crc kubenswrapper[5102]: I0123 06:54:14.679618 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.244800 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.245089 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.247141 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.247191 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.247203 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.533367 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-11 15:53:15.930772772 +0000 UTC Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.665963 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.675716 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.680319 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.680425 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.681825 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.681885 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.681903 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.682713 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.682767 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:15 crc kubenswrapper[5102]: I0123 06:54:15.682790 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.145392 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.217664 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.527729 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.534317 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 08:24:06.121407429 +0000 UTC Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.683221 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.683327 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.685015 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.685078 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.685095 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.685627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.685757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.685850 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.738137 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.738660 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.740189 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.740233 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:16 crc kubenswrapper[5102]: I0123 06:54:16.740248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:17 crc kubenswrapper[5102]: I0123 06:54:17.535528 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-24 04:24:46.410757642 +0000 UTC Jan 23 06:54:17 crc kubenswrapper[5102]: I0123 06:54:17.686699 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:17 crc kubenswrapper[5102]: I0123 06:54:17.687892 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:17 crc kubenswrapper[5102]: I0123 06:54:17.687936 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:17 crc kubenswrapper[5102]: I0123 06:54:17.687952 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.245614 5102 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.246014 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.371718 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.372372 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.374100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.374153 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.374170 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:18 crc kubenswrapper[5102]: I0123 06:54:18.537631 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 05:44:52.132570969 +0000 UTC Jan 23 06:54:19 crc kubenswrapper[5102]: I0123 06:54:19.538625 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 03:38:36.16425803 +0000 UTC Jan 23 06:54:19 crc kubenswrapper[5102]: E0123 06:54:19.683653 5102 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Jan 23 06:54:20 crc kubenswrapper[5102]: I0123 06:54:20.294595 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:54:20 crc kubenswrapper[5102]: I0123 06:54:20.294949 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:20 crc kubenswrapper[5102]: I0123 06:54:20.296630 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:20 crc kubenswrapper[5102]: I0123 06:54:20.296695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:20 crc kubenswrapper[5102]: I0123 06:54:20.296720 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:20 crc kubenswrapper[5102]: I0123 06:54:20.539257 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 18:25:13.656765149 +0000 UTC Jan 23 06:54:21 crc kubenswrapper[5102]: I0123 06:54:21.084626 5102 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 23 06:54:21 crc kubenswrapper[5102]: I0123 06:54:21.084723 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 23 06:54:21 crc kubenswrapper[5102]: I0123 06:54:21.528766 5102 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": net/http: TLS handshake timeout Jan 23 06:54:21 crc kubenswrapper[5102]: I0123 06:54:21.540158 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 23:32:26.80285684 +0000 UTC Jan 23 06:54:21 crc kubenswrapper[5102]: E0123 06:54:21.543394 5102 certificate_manager.go:562] "Unhandled Error" err="kubernetes.io/kube-apiserver-client-kubelet: Failed while requesting a signed certificate from the control plane: cannot create certificate signing request: Post \"https://api-int.crc.testing:6443/apis/certificates.k8s.io/v1/certificatesigningrequests\": net/http: TLS handshake timeout" logger="UnhandledError" Jan 23 06:54:22 crc kubenswrapper[5102]: I0123 06:54:22.094406 5102 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 23 06:54:22 crc kubenswrapper[5102]: I0123 06:54:22.094479 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 23 06:54:22 crc kubenswrapper[5102]: I0123 06:54:22.103154 5102 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Jan 23 06:54:22 crc kubenswrapper[5102]: I0123 06:54:22.103252 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Jan 23 06:54:22 crc kubenswrapper[5102]: I0123 06:54:22.540674 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 04:22:13.642986793 +0000 UTC Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.526575 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.526876 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.528639 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.528711 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.528740 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.532332 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.541424 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 15:44:11.518322316 +0000 UTC Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.703565 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.705282 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.705344 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:23 crc kubenswrapper[5102]: I0123 06:54:23.705363 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:24 crc kubenswrapper[5102]: I0123 06:54:24.542609 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 06:15:18.987696471 +0000 UTC Jan 23 06:54:25 crc kubenswrapper[5102]: I0123 06:54:25.543072 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 01:46:07.127146523 +0000 UTC Jan 23 06:54:25 crc kubenswrapper[5102]: I0123 06:54:25.785405 5102 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Rotating certificates Jan 23 06:54:25 crc kubenswrapper[5102]: I0123 06:54:25.804488 5102 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.153405 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.153640 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.155119 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.155172 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.155190 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.543500 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-07 13:13:31.921363475 +0000 UTC Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.563524 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.563893 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.565620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.565696 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.565713 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.587440 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.712846 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.714690 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.714779 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:26 crc kubenswrapper[5102]: I0123 06:54:26.714802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.092755 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="3.2s" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.104668 5102 trace.go:236] Trace[336070992]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 06:54:12.963) (total time: 14141ms): Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[336070992]: ---"Objects listed" error: 14141ms (06:54:27.104) Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[336070992]: [14.141510534s] [14.141510534s] END Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.104725 5102 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.104792 5102 trace.go:236] Trace[1056731382]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 06:54:13.294) (total time: 13810ms): Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[1056731382]: ---"Objects listed" error: 13809ms (06:54:27.104) Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[1056731382]: [13.81006295s] [13.81006295s] END Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.104843 5102 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.106963 5102 trace.go:236] Trace[1704368416]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 06:54:12.544) (total time: 14562ms): Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[1704368416]: ---"Objects listed" error: 14562ms (06:54:27.106) Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[1704368416]: [14.562359395s] [14.562359395s] END Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.107003 5102 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.107859 5102 trace.go:236] Trace[1727124812]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (23-Jan-2026 06:54:12.514) (total time: 14592ms): Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[1727124812]: ---"Objects listed" error: 14592ms (06:54:27.107) Jan 23 06:54:27 crc kubenswrapper[5102]: Trace[1727124812]: [14.59297918s] [14.59297918s] END Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.107903 5102 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.117777 5102 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.123142 5102 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.432428 5102 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:53548->192.168.126.11:17697: read: connection reset by peer" start-of-body= Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.432528 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:53548->192.168.126.11:17697: read: connection reset by peer" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.433168 5102 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.433224 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.433606 5102 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.433695 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.508647 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.512970 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.525301 5102 apiserver.go:52] "Watching apiserver" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.530325 5102 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.530656 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb","openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf"] Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.531119 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.531168 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.531131 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.531271 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.531275 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.531342 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.531444 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.531494 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.531083 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.535649 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.535889 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.536029 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.541312 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.541407 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.541992 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.542082 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.544239 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 07:06:16.444822336 +0000 UTC Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.544477 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.544617 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.569633 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.582416 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.594113 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.604640 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.613734 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.623803 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.630511 5102 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.635041 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.649671 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.664159 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.675510 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.686443 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.718286 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.721171 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40" exitCode=255 Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.721303 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40"} Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.726363 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.726920 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.727172 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.727204 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.727444 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.727755 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.728171 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.728398 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.728648 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.729407 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.729675 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.730047 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.730271 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.730585 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.730938 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.731312 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.731931 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.732219 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.732461 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.729097 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.729309 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.730046 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.731532 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.733118 5102 kubelet.go:1929] "Failed creating a mirror pod for" err="pods \"kube-controller-manager-crc\" already exists" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.734685 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.734834 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.732725 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735142 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735199 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735260 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735312 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735435 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735359 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735600 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735693 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735759 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735767 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735800 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735849 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735899 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735936 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.735978 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736018 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736094 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736137 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736179 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736225 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736272 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736314 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736361 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736396 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736438 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736202 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736642 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736795 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.736910 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.737283 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.737306 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.737840 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.737935 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.738378 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.740715 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.741126 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.741245 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.741965 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.742122 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.742402 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.742861 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.742497 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.743070 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.743355 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.744081 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.745968 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.745975 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746310 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746487 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746517 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746593 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746664 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746720 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746782 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746800 5102 scope.go:117] "RemoveContainer" containerID="2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746841 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746896 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746962 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747016 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747003 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747076 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747128 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747327 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747395 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747448 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747514 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747604 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747661 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747723 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747772 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747782 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747917 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747941 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.746513 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747969 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747998 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748059 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748092 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748124 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748159 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748189 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748212 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748236 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748263 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748288 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748316 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748348 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748370 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748391 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748416 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748448 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748470 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748505 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748534 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748573 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748595 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748621 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748647 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748666 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748691 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748712 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748733 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748756 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748777 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748802 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748823 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748856 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748879 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748898 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748923 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749567 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749663 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749716 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749883 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.751673 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.751748 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.751810 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.751868 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.751927 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755226 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755359 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755765 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755866 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755938 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755992 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756081 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756695 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747072 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747097 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747174 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747340 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.757469 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747791 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.747854 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748464 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749196 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749293 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749302 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749567 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.750635 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.749906 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.751364 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.750677 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.751855 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.752139 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.752269 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.753637 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.754247 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.754361 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755054 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755140 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755825 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755887 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756267 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.757863 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.758103 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.759653 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761309 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761403 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761447 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761482 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756294 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761517 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761576 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761611 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761647 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761680 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761711 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761746 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761778 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761813 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761848 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761884 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761916 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761949 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761979 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762013 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762041 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762148 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762186 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762221 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762259 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762296 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762372 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762406 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762434 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762466 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762499 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762529 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762611 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762643 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762675 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762706 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762736 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762770 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762808 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762842 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762876 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762909 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762941 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762977 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763006 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763037 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763072 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763105 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763161 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763193 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763228 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763257 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763498 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763559 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763604 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763637 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763674 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763708 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763733 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763762 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763786 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763815 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763847 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763880 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763996 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764065 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764094 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764117 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764140 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764164 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764189 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764249 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764273 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764338 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764363 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764385 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764410 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764469 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764500 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764531 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764593 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764624 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764652 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764677 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764703 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764731 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764763 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764799 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764833 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764867 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764903 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764996 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765019 5102 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765040 5102 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765058 5102 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765075 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765095 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765116 5102 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765134 5102 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765151 5102 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765169 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765186 5102 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765201 5102 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765217 5102 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765235 5102 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765253 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765270 5102 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765287 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765304 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765321 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765338 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765355 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765374 5102 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765388 5102 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765401 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765414 5102 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765428 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765441 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765457 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765470 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765481 5102 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765494 5102 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765508 5102 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765522 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765535 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765578 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765596 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765612 5102 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765628 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765646 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.766191 5102 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769649 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769704 5102 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769729 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769755 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769777 5102 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769799 5102 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769823 5102 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769845 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769865 5102 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769889 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769911 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769940 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769962 5102 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769983 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770005 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770026 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770047 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770118 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770140 5102 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770164 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770185 5102 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770205 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.755908 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756302 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.748420 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.754610 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756662 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756708 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.757054 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.756267 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.757228 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.757437 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.757902 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.759412 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.759456 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.760146 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761055 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761066 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761203 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761968 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.761989 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762016 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762252 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.762939 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763074 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763907 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763748 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763930 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764337 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.763734 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.770305 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764910 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.771137 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.771214 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.771277 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.771361 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.771875 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.771928 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.772411 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.772866 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.772910 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.772578 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.773307 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.773557 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765238 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.773636 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.773876 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.764663 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765488 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.774216 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.766213 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.768104 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775387 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.768457 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775441 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.769527 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775651 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775651 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775749 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775783 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775844 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.775991 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.776210 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.776329 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.776718 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.776810 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.776977 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777017 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777034 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777115 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777245 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777436 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777456 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777585 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777717 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777871 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777924 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777923 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.777966 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.778125 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.778189 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.778368 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.778722 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.778749 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.779038 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.779066 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.779093 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.779179 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.779213 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.779883 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.780132 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.780316 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.765797 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.780401 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.780790 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.780956 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.781339 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.781700 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.782396 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.782840 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.783067 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.783272 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.783622 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.783919 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.784284 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.785093 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.785217 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.785828 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.786078 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.786298 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.786795 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.787023 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.789536 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.790454 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.791033 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.791728 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.791898 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:28.286372411 +0000 UTC m=+19.106721616 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.787776 5102 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.792308 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.792332 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.792347 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.792362 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:54:28.292340284 +0000 UTC m=+19.112689479 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.786300 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.792394 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:28.292381986 +0000 UTC m=+19.112730971 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.796168 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.797143 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:28.29710583 +0000 UTC m=+19.117455025 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.798413 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.798948 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.799287 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.800259 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.801117 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.801414 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.802445 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.804633 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.805400 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.806584 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.812147 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.812192 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.812216 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:27 crc kubenswrapper[5102]: E0123 06:54:27.812288 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:28.312262086 +0000 UTC m=+19.132611101 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.812843 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.813282 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.821184 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.821721 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.821785 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.822299 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.824061 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.825532 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.826563 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.829041 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.829149 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.829169 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.829261 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.829331 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.830181 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.830228 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.830346 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.838885 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.840705 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.841459 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.852693 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.855933 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.864594 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871340 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871392 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871482 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871497 5102 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871511 5102 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871524 5102 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871559 5102 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871578 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871592 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871605 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871618 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871631 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871643 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871654 5102 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871666 5102 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871676 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871687 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871700 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871713 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871724 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871736 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871748 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871761 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871774 5102 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871786 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871798 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871811 5102 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871822 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871835 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871847 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871858 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871869 5102 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871884 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871895 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871905 5102 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871917 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871928 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871939 5102 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871951 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871962 5102 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872052 5102 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872065 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872077 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872087 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872098 5102 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872109 5102 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872119 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872129 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872139 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872151 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872162 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872175 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872186 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872197 5102 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872208 5102 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872219 5102 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872230 5102 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872242 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872252 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872262 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872273 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872284 5102 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872298 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872310 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872320 5102 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872331 5102 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872341 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872352 5102 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872364 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872376 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872387 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872400 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872411 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872421 5102 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872432 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872452 5102 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872463 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872475 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872485 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872497 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872507 5102 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872518 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872532 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872531 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872568 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872583 5102 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872594 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.871857 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872631 5102 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872655 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872666 5102 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872679 5102 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872695 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872713 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872728 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872743 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872754 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872764 5102 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872773 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872781 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872807 5102 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872819 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872829 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872838 5102 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872847 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872857 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872867 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872877 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872886 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872897 5102 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872908 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872918 5102 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872932 5102 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872941 5102 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872951 5102 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872962 5102 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872972 5102 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872981 5102 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.872990 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873001 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873011 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873023 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873033 5102 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873042 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873052 5102 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873062 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873072 5102 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873081 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873091 5102 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873101 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873111 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873121 5102 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873132 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873141 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873152 5102 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: I0123 06:54:27.873162 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:54:27 crc kubenswrapper[5102]: W0123 06:54:27.874056 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-2e98e82bb1f736cc6abc4776ff63f5a5ed3da88a1c5e2247f9d57e9cc56b00d5 WatchSource:0}: Error finding container 2e98e82bb1f736cc6abc4776ff63f5a5ed3da88a1c5e2247f9d57e9cc56b00d5: Status 404 returned error can't find the container with id 2e98e82bb1f736cc6abc4776ff63f5a5ed3da88a1c5e2247f9d57e9cc56b00d5 Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.162482 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.168916 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Jan 23 06:54:28 crc kubenswrapper[5102]: W0123 06:54:28.173592 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-76a20965fad571a5df146605ba93964202215141de54339f6a84b3b38cc15bc9 WatchSource:0}: Error finding container 76a20965fad571a5df146605ba93964202215141de54339f6a84b3b38cc15bc9: Status 404 returned error can't find the container with id 76a20965fad571a5df146605ba93964202215141de54339f6a84b3b38cc15bc9 Jan 23 06:54:28 crc kubenswrapper[5102]: W0123 06:54:28.188338 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-c97a1b478433ec1338a5aab4f64b8c78ab114e2196fe1c21cfae1e35f258f5aa WatchSource:0}: Error finding container c97a1b478433ec1338a5aab4f64b8c78ab114e2196fe1c21cfae1e35f258f5aa: Status 404 returned error can't find the container with id c97a1b478433ec1338a5aab4f64b8c78ab114e2196fe1c21cfae1e35f258f5aa Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.382027 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382147 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:54:29.382116247 +0000 UTC m=+20.202465232 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.382354 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382566 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.382532 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382589 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382659 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.382632 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382706 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:29.382695355 +0000 UTC m=+20.203044340 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382626 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.382741 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382833 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.382870 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:29.38282918 +0000 UTC m=+20.203178185 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.383002 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:29.382971034 +0000 UTC m=+20.203320059 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.383233 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.384523 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.384735 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:28 crc kubenswrapper[5102]: E0123 06:54:28.385170 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:29.38514643 +0000 UTC m=+20.205495446 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.545144 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 03:49:53.867117707 +0000 UTC Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.725130 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"c97a1b478433ec1338a5aab4f64b8c78ab114e2196fe1c21cfae1e35f258f5aa"} Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.727625 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025"} Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.727747 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"76a20965fad571a5df146605ba93964202215141de54339f6a84b3b38cc15bc9"} Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.729690 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9"} Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.729728 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9"} Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.729741 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"2e98e82bb1f736cc6abc4776ff63f5a5ed3da88a1c5e2247f9d57e9cc56b00d5"} Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.740719 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.748580 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e"} Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.748640 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.800876 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:28Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.819054 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:28Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.837089 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:28Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.853937 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:28Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:28 crc kubenswrapper[5102]: I0123 06:54:28.873208 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:28Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.009288 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.134742 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.154310 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.166715 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.177914 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.188736 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.203831 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.225900 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.240056 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.258723 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.274135 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.392292 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.392407 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.392450 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.392481 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392533 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:54:31.392500195 +0000 UTC m=+22.212849210 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392605 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.392622 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392665 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:31.39264822 +0000 UTC m=+22.212997205 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392792 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392816 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392837 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392894 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:31.392875467 +0000 UTC m=+22.213224482 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392979 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.392997 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.393012 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.393072 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:31.393058523 +0000 UTC m=+22.213407538 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.393150 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.393200 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:31.393182557 +0000 UTC m=+22.213531552 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.548581 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-07 00:22:51.750585122 +0000 UTC Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.597131 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.597171 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.597453 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.597202 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.597731 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:29 crc kubenswrapper[5102]: E0123 06:54:29.597611 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.601926 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.602414 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.603190 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.603878 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.604471 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.605032 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.605722 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.606365 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.606983 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.607497 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.607987 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.608652 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.609194 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.609784 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.610303 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.611994 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.613375 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.613982 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.614394 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.615351 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.615993 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.616820 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.617372 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.617821 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.618797 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.619247 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.620232 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.620861 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.621694 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.622268 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.623235 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.623719 5102 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.623820 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.625808 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.625899 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.626321 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.626765 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.628273 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.629750 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.630415 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.631443 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.632125 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.633195 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.634256 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.635265 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.635843 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.636638 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.637152 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.638088 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.638796 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.639630 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.640076 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.640907 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.641398 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.641568 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.641970 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.642841 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.654893 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.666667 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.681967 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.702815 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:29 crc kubenswrapper[5102]: I0123 06:54:29.719074 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.318768 5102 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.321182 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.321252 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.321271 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.321426 5102 kubelet_node_status.go:76] "Attempting to register node" node="crc" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.334408 5102 kubelet_node_status.go:115] "Node was previously registered" node="crc" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.334610 5102 kubelet_node_status.go:79] "Successfully registered node" node="crc" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.336197 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.336246 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.336256 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.336283 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.336297 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: E0123 06:54:30.360418 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:30Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.364589 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.364655 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.364674 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.364702 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.364718 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: E0123 06:54:30.381327 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:30Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.401236 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.401332 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.401355 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.401415 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.401435 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: E0123 06:54:30.505345 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:30Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.511453 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.511497 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.511506 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.511524 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.511550 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: E0123 06:54:30.524924 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:30Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.529432 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.529510 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.529521 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.529556 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.529567 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: E0123 06:54:30.543984 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:30Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:30Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:30 crc kubenswrapper[5102]: E0123 06:54:30.544190 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.546263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.546301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.546316 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.546350 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.546368 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.550169 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 10:29:17.908217843 +0000 UTC Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.649722 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.649803 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.649820 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.649844 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.649865 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.752819 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.752868 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.752885 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.752909 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.752927 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.855942 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.856003 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.856016 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.856036 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:30 crc kubenswrapper[5102]: I0123 06:54:30.856051 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:30Z","lastTransitionTime":"2026-01-23T06:54:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.083710 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.083766 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.083779 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.083799 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.083812 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:31Z","lastTransitionTime":"2026-01-23T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.325666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.325713 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.325725 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.325743 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.325755 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:31Z","lastTransitionTime":"2026-01-23T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.424176 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.424267 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.424293 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.424311 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.424331 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424473 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424490 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424500 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424635 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:54:35.424599178 +0000 UTC m=+26.244948173 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424642 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424667 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424695 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:35.42468134 +0000 UTC m=+26.245030315 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424697 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424722 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424737 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:35.424721482 +0000 UTC m=+26.245070477 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424768 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:35.424758923 +0000 UTC m=+26.245107918 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424822 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.424861 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:35.424851626 +0000 UTC m=+26.245200611 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.428280 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.428342 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.428361 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.428386 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.428404 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:31Z","lastTransitionTime":"2026-01-23T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.531453 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.531493 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.531503 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.531517 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.531527 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:31Z","lastTransitionTime":"2026-01-23T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.550893 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 06:00:06.997031058 +0000 UTC Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.597467 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.597561 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.597632 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.597712 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.597878 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:31 crc kubenswrapper[5102]: E0123 06:54:31.597944 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.698035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.698069 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.698080 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.698097 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.698109 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:31Z","lastTransitionTime":"2026-01-23T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.801406 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.801445 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.801454 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.801471 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.801482 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:31Z","lastTransitionTime":"2026-01-23T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.903775 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.903815 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.903830 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.903845 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.903856 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:31Z","lastTransitionTime":"2026-01-23T06:54:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:31 crc kubenswrapper[5102]: I0123 06:54:31.985931 5102 csr.go:261] certificate signing request csr-hggqk is approved, waiting to be issued Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.009759 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.009825 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.009837 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.009861 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.009874 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.033394 5102 csr.go:257] certificate signing request csr-hggqk is issued Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.112943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.112987 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.112998 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.113016 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.113026 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.215453 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.215499 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.215510 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.215527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.215557 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.318133 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.318183 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.318197 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.318221 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.318235 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.421143 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.421194 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.421205 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.421223 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.421238 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.457520 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/node-resolver-pht4g"] Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.458213 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.460445 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.460710 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.460836 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.470951 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.488313 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.498378 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.512585 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.523248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.523283 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.523293 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.523311 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.523323 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.528504 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.541227 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.551626 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.551900 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-14 09:34:03.878844212 +0000 UTC Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.565307 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.571127 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gzsrk\" (UniqueName: \"kubernetes.io/projected/7fdff815-7d6c-4f01-946d-bc444475aa15-kube-api-access-gzsrk\") pod \"node-resolver-pht4g\" (UID: \"7fdff815-7d6c-4f01-946d-bc444475aa15\") " pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.571192 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/7fdff815-7d6c-4f01-946d-bc444475aa15-hosts-file\") pod \"node-resolver-pht4g\" (UID: \"7fdff815-7d6c-4f01-946d-bc444475aa15\") " pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.581285 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.626129 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.626185 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.626196 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.626217 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.626229 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.672191 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/7fdff815-7d6c-4f01-946d-bc444475aa15-hosts-file\") pod \"node-resolver-pht4g\" (UID: \"7fdff815-7d6c-4f01-946d-bc444475aa15\") " pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.672287 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gzsrk\" (UniqueName: \"kubernetes.io/projected/7fdff815-7d6c-4f01-946d-bc444475aa15-kube-api-access-gzsrk\") pod \"node-resolver-pht4g\" (UID: \"7fdff815-7d6c-4f01-946d-bc444475aa15\") " pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.672344 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/7fdff815-7d6c-4f01-946d-bc444475aa15-hosts-file\") pod \"node-resolver-pht4g\" (UID: \"7fdff815-7d6c-4f01-946d-bc444475aa15\") " pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.692471 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gzsrk\" (UniqueName: \"kubernetes.io/projected/7fdff815-7d6c-4f01-946d-bc444475aa15-kube-api-access-gzsrk\") pod \"node-resolver-pht4g\" (UID: \"7fdff815-7d6c-4f01-946d-bc444475aa15\") " pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.729266 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.729357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.729386 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.729421 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.729444 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.759813 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.774961 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.775987 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-pht4g" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.800350 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: W0123 06:54:32.801613 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7fdff815_7d6c_4f01_946d_bc444475aa15.slice/crio-9be41d277c31287548454f52a77a4147c62518e050880f3dc387da9e2fd7d2cd WatchSource:0}: Error finding container 9be41d277c31287548454f52a77a4147c62518e050880f3dc387da9e2fd7d2cd: Status 404 returned error can't find the container with id 9be41d277c31287548454f52a77a4147c62518e050880f3dc387da9e2fd7d2cd Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.832301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.832885 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.832906 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.832931 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.832948 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.835302 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.856977 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-5vv4l"] Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.857286 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-fbvf7"] Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.857900 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.858282 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.861197 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.861366 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.861470 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.861933 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.863936 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.865848 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-vnmgh"] Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.866817 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.867073 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.867107 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.879320 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.879559 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.879680 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.879895 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.880427 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.893452 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.942967 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.943007 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.943020 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.943042 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.943061 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:32Z","lastTransitionTime":"2026-01-23T06:54:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975239 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-socket-dir-parent\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975336 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-netns\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975359 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-os-release\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975392 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/04f943d6-91c5-4493-b310-de0b8ef7966e-mcd-auth-proxy-config\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975410 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-cni-bin\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975427 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/04f943d6-91c5-4493-b310-de0b8ef7966e-rootfs\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975443 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-os-release\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975462 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfws4\" (UniqueName: \"kubernetes.io/projected/c1446a26-ae38-40f3-a313-8604f5e98285-kube-api-access-sfws4\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975481 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-cni-multus\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975497 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-conf-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975518 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kv7qq\" (UniqueName: \"kubernetes.io/projected/04f943d6-91c5-4493-b310-de0b8ef7966e-kube-api-access-kv7qq\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975555 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cnibin\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975578 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975608 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-hostroot\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975827 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975850 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-cni-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975871 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-cnibin\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975886 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cni-binary-copy\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975902 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c1446a26-ae38-40f3-a313-8604f5e98285-cni-binary-copy\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975979 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c1446a26-ae38-40f3-a313-8604f5e98285-multus-daemon-config\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.975997 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-multus-certs\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.976014 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2tss9\" (UniqueName: \"kubernetes.io/projected/b0672e4f-cd9f-47e6-8909-43e33fb9c254-kube-api-access-2tss9\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.976039 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-k8s-cni-cncf-io\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.976054 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-kubelet\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.976071 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-etc-kubernetes\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.976086 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/04f943d6-91c5-4493-b310-de0b8ef7966e-proxy-tls\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.976127 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-system-cni-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:32 crc kubenswrapper[5102]: I0123 06:54:32.976143 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-system-cni-dir\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:32.999855 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.038079 5102 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2027-01-23 06:49:32 +0000 UTC, rotation deadline is 2026-12-06 05:13:41.865782049 +0000 UTC Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.038199 5102 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 7606h19m8.827586748s for next certificate rotation Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.055786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.055828 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.055839 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.055858 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.055871 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.058568 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077264 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2tss9\" (UniqueName: \"kubernetes.io/projected/b0672e4f-cd9f-47e6-8909-43e33fb9c254-kube-api-access-2tss9\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077330 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-multus-certs\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077364 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/04f943d6-91c5-4493-b310-de0b8ef7966e-proxy-tls\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077395 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-k8s-cni-cncf-io\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077414 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-kubelet\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077434 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-etc-kubernetes\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077451 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-system-cni-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077470 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-system-cni-dir\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077502 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-socket-dir-parent\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077523 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-netns\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077591 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-os-release\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077617 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/04f943d6-91c5-4493-b310-de0b8ef7966e-mcd-auth-proxy-config\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077652 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-cni-bin\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077675 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/04f943d6-91c5-4493-b310-de0b8ef7966e-rootfs\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077699 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-os-release\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077725 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfws4\" (UniqueName: \"kubernetes.io/projected/c1446a26-ae38-40f3-a313-8604f5e98285-kube-api-access-sfws4\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077745 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-cni-multus\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077763 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-conf-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077778 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kv7qq\" (UniqueName: \"kubernetes.io/projected/04f943d6-91c5-4493-b310-de0b8ef7966e-kube-api-access-kv7qq\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077793 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cnibin\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077808 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077834 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077858 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-hostroot\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077872 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-cni-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077890 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-cnibin\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077905 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cni-binary-copy\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077920 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c1446a26-ae38-40f3-a313-8604f5e98285-cni-binary-copy\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.077934 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c1446a26-ae38-40f3-a313-8604f5e98285-multus-daemon-config\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.078724 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/c1446a26-ae38-40f3-a313-8604f5e98285-multus-daemon-config\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079066 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-multus-certs\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079803 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-os-release\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079844 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-k8s-cni-cncf-io\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079867 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-kubelet\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079889 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-etc-kubernetes\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079923 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-system-cni-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079946 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-system-cni-dir\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079979 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-socket-dir-parent\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.079999 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-run-netns\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.080030 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-os-release\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.080579 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/04f943d6-91c5-4493-b310-de0b8ef7966e-mcd-auth-proxy-config\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.080624 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/04f943d6-91c5-4493-b310-de0b8ef7966e-rootfs\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.080701 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-cni-bin\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.080813 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-conf-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.081080 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-tuning-conf-dir\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.081246 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-host-var-lib-cni-multus\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.081294 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cnibin\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.081579 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-multus-cni-dir\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.081632 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-hostroot\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.081936 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.081992 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/c1446a26-ae38-40f3-a313-8604f5e98285-cnibin\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.082322 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/b0672e4f-cd9f-47e6-8909-43e33fb9c254-cni-binary-copy\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.082440 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/c1446a26-ae38-40f3-a313-8604f5e98285-cni-binary-copy\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.083496 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/04f943d6-91c5-4493-b310-de0b8ef7966e-proxy-tls\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.113430 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.114597 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2tss9\" (UniqueName: \"kubernetes.io/projected/b0672e4f-cd9f-47e6-8909-43e33fb9c254-kube-api-access-2tss9\") pod \"multus-additional-cni-plugins-fbvf7\" (UID: \"b0672e4f-cd9f-47e6-8909-43e33fb9c254\") " pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.124087 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kv7qq\" (UniqueName: \"kubernetes.io/projected/04f943d6-91c5-4493-b310-de0b8ef7966e-kube-api-access-kv7qq\") pod \"machine-config-daemon-vnmgh\" (UID: \"04f943d6-91c5-4493-b310-de0b8ef7966e\") " pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.127108 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfws4\" (UniqueName: \"kubernetes.io/projected/c1446a26-ae38-40f3-a313-8604f5e98285-kube-api-access-sfws4\") pod \"multus-5vv4l\" (UID: \"c1446a26-ae38-40f3-a313-8604f5e98285\") " pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.157783 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.157817 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.157825 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.157840 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.157850 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.180657 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.193994 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-5vv4l" Jan 23 06:54:33 crc kubenswrapper[5102]: W0123 06:54:33.196756 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb0672e4f_cd9f_47e6_8909_43e33fb9c254.slice/crio-8c43038c5d3f1455e28ed44ddf8826a597141731d2522337829853a369f8b9e6 WatchSource:0}: Error finding container 8c43038c5d3f1455e28ed44ddf8826a597141731d2522337829853a369f8b9e6: Status 404 returned error can't find the container with id 8c43038c5d3f1455e28ed44ddf8826a597141731d2522337829853a369f8b9e6 Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.199876 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:54:33 crc kubenswrapper[5102]: W0123 06:54:33.210755 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podc1446a26_ae38_40f3_a313_8604f5e98285.slice/crio-5040a7a590d75ea9cac80e9e3bdd1c8b718afb0152273190c47fb04220d8a460 WatchSource:0}: Error finding container 5040a7a590d75ea9cac80e9e3bdd1c8b718afb0152273190c47fb04220d8a460: Status 404 returned error can't find the container with id 5040a7a590d75ea9cac80e9e3bdd1c8b718afb0152273190c47fb04220d8a460 Jan 23 06:54:33 crc kubenswrapper[5102]: W0123 06:54:33.215756 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod04f943d6_91c5_4493_b310_de0b8ef7966e.slice/crio-87b2dd16e7eea20f636aeb17e615515e6c636d98c9d1eea2431ff4554b35ae36 WatchSource:0}: Error finding container 87b2dd16e7eea20f636aeb17e615515e6c636d98c9d1eea2431ff4554b35ae36: Status 404 returned error can't find the container with id 87b2dd16e7eea20f636aeb17e615515e6c636d98c9d1eea2431ff4554b35ae36 Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.246838 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.259835 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.259864 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.259872 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.259886 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.259895 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.299525 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.312457 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.328356 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.340915 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cgkqt"] Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.341702 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.345047 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.345102 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.345342 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.345705 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.345965 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.346182 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.346299 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.358052 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.362523 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.362632 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.362643 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.362662 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.362677 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.372461 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.400474 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.424090 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.438076 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.456672 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.464370 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.464416 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.464428 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.464448 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.464461 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.474390 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481805 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-kubelet\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481846 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-systemd\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481870 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovn-node-metrics-cert\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481893 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481919 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-slash\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481953 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-netd\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481978 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-netns\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.481998 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-node-log\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482022 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-script-lib\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482057 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-config\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482082 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-systemd-units\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482106 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-bin\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482131 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-env-overrides\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482152 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-var-lib-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482173 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-ovn\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482197 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sjfbl\" (UniqueName: \"kubernetes.io/projected/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-kube-api-access-sjfbl\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482230 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482285 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-etc-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482325 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-ovn-kubernetes\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.482366 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-log-socket\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.518943 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.534502 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.547938 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.561652 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.576050 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.603627 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.606672 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:33 crc kubenswrapper[5102]: E0123 06:54:33.606803 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.607133 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:33 crc kubenswrapper[5102]: E0123 06:54:33.607202 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.607260 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:33 crc kubenswrapper[5102]: E0123 06:54:33.607320 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.608191 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 23:54:50.583745535 +0000 UTC Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.608905 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.608935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.608948 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.608966 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.608981 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.724108 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.724136 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.724145 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.724159 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.724169 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.728182 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.732721 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-env-overrides\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.732801 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-var-lib-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.732834 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-ovn\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.732862 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sjfbl\" (UniqueName: \"kubernetes.io/projected/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-kube-api-access-sjfbl\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.732913 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.732938 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-etc-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.732967 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-ovn-kubernetes\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733000 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-log-socket\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733033 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-kubelet\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733058 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-systemd\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733097 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733151 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovn-node-metrics-cert\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733181 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-slash\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733203 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-netd\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733261 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-netns\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733282 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-node-log\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733302 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-script-lib\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733329 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-config\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733353 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-systemd-units\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733372 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-bin\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.733443 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-bin\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734186 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-env-overrides\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734229 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-var-lib-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734280 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-ovn\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734678 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734709 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-etc-openvswitch\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734731 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-ovn-kubernetes\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734752 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-log-socket\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734773 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-kubelet\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734794 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-systemd\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.734820 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.735973 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-script-lib\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.736059 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-slash\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.736087 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-netd\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.736111 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-netns\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.736137 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-node-log\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.736162 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-systemd-units\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.736584 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-config\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.742930 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovn-node-metrics-cert\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.758778 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.771046 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.771129 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"87b2dd16e7eea20f636aeb17e615515e6c636d98c9d1eea2431ff4554b35ae36"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.776001 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerStarted","Data":"006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.776071 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerStarted","Data":"5040a7a590d75ea9cac80e9e3bdd1c8b718afb0152273190c47fb04220d8a460"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.778183 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerStarted","Data":"f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.778250 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerStarted","Data":"8c43038c5d3f1455e28ed44ddf8826a597141731d2522337829853a369f8b9e6"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.780583 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pht4g" event={"ID":"7fdff815-7d6c-4f01-946d-bc444475aa15","Type":"ContainerStarted","Data":"2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.780635 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-pht4g" event={"ID":"7fdff815-7d6c-4f01-946d-bc444475aa15","Type":"ContainerStarted","Data":"9be41d277c31287548454f52a77a4147c62518e050880f3dc387da9e2fd7d2cd"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.789393 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.789870 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sjfbl\" (UniqueName: \"kubernetes.io/projected/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-kube-api-access-sjfbl\") pod \"ovnkube-node-cgkqt\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.814662 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.839648 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.839716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.839726 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.839746 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.839761 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.840740 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.857302 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.877303 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.891876 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.902692 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.915483 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.928641 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.942356 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.954747 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.993600 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.993872 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.993892 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.993920 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.993932 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:33Z","lastTransitionTime":"2026-01-23T06:54:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:33 crc kubenswrapper[5102]: I0123 06:54:33.994675 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.016505 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.060584 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.109333 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.112177 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.112199 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.112207 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.112222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.112233 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:34Z","lastTransitionTime":"2026-01-23T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.123255 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.135678 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.149570 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.162158 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.176227 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.189808 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.199373 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.215029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.215067 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.215076 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.215095 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.215105 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:34Z","lastTransitionTime":"2026-01-23T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.317869 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.317942 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.317960 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.317984 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.318000 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:34Z","lastTransitionTime":"2026-01-23T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.445621 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.445653 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.445664 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.445680 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.445690 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:34Z","lastTransitionTime":"2026-01-23T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.556613 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.556667 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.556676 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.556695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.556705 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:34Z","lastTransitionTime":"2026-01-23T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.608987 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-14 21:25:09.512291952 +0000 UTC Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.659554 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.659602 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.659614 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.659633 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.659645 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:34Z","lastTransitionTime":"2026-01-23T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.893100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.893333 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.893341 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.893357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.893368 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:34Z","lastTransitionTime":"2026-01-23T06:54:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.896885 5102 generic.go:334] "Generic (PLEG): container finished" podID="b0672e4f-cd9f-47e6-8909-43e33fb9c254" containerID="f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535" exitCode=0 Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.896945 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerDied","Data":"f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.900157 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733" exitCode=0 Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.900198 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.900219 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"d5fa8289023fded4f70c31a498908d8405775caf992283a000ad815b29bbe14f"} Jan 23 06:54:34 crc kubenswrapper[5102]: I0123 06:54:34.903808 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.021280 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.021308 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.021316 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.021328 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.021340 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.139571 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.158393 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.158464 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.158488 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.158526 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.158572 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.248588 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.264008 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.264043 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.264050 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.264065 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.264074 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.296961 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.357589 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.367556 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.367597 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.367607 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.367632 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.367641 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.436578 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.460251 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.460392 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460468 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:54:43.460449583 +0000 UTC m=+34.280798568 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.460500 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.460528 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.460562 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460561 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460636 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460646 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:43.460627539 +0000 UTC m=+34.280976514 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460668 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:43.46066165 +0000 UTC m=+34.281010625 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460746 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460760 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460770 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460790 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:43.460784363 +0000 UTC m=+34.281133338 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460802 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460861 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460878 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.460981 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:43.460950489 +0000 UTC m=+34.281299654 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.463828 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.478491 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.507086 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.507130 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.507142 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.507166 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.507178 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.551599 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.597165 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.597931 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.598034 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.598110 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.598182 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:35 crc kubenswrapper[5102]: E0123 06:54:35.598277 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.609861 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.609908 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.609917 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.609931 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.609946 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.610261 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 06:35:45.468452786 +0000 UTC Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.644263 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.712193 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.712243 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.712254 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.712274 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.712316 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.780147 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.807807 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.814741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.814797 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.814808 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.814825 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.814837 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.825170 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.837293 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.853092 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.869616 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.889080 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.900434 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.914113 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.925904 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.941937 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.970306 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.970344 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.970357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.970374 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.970385 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:35Z","lastTransitionTime":"2026-01-23T06:54:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:35 crc kubenswrapper[5102]: I0123 06:54:35.988265 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:35Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.010561 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.026609 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.042577 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.193858 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.193909 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.193918 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.193935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.193951 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:36Z","lastTransitionTime":"2026-01-23T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.194892 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.227456 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.298405 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.298879 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.298892 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.298912 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.298925 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:36Z","lastTransitionTime":"2026-01-23T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.401251 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.401292 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.401301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.401315 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.401325 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:36Z","lastTransitionTime":"2026-01-23T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.527354 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.527384 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.527394 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.527408 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.527416 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:36Z","lastTransitionTime":"2026-01-23T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.574377 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-bmrp4"] Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.574735 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.576346 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.576823 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.577243 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.577363 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.589700 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/be881145-20bb-48fc-901b-d854e7bf15c5-serviceca\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.589760 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be881145-20bb-48fc-901b-d854e7bf15c5-host\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.589823 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xvkpn\" (UniqueName: \"kubernetes.io/projected/be881145-20bb-48fc-901b-d854e7bf15c5-kube-api-access-xvkpn\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.607384 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.610338 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 12:08:49.84283318 +0000 UTC Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.623818 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.629692 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.629716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.629726 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.629744 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.629757 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:36Z","lastTransitionTime":"2026-01-23T06:54:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.638266 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.651661 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.682736 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.690276 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xvkpn\" (UniqueName: \"kubernetes.io/projected/be881145-20bb-48fc-901b-d854e7bf15c5-kube-api-access-xvkpn\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.690310 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be881145-20bb-48fc-901b-d854e7bf15c5-host\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.690372 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/be881145-20bb-48fc-901b-d854e7bf15c5-serviceca\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.691303 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/be881145-20bb-48fc-901b-d854e7bf15c5-serviceca\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.691563 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be881145-20bb-48fc-901b-d854e7bf15c5-host\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.736120 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.738288 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xvkpn\" (UniqueName: \"kubernetes.io/projected/be881145-20bb-48fc-901b-d854e7bf15c5-kube-api-access-xvkpn\") pod \"node-ca-bmrp4\" (UID: \"be881145-20bb-48fc-901b-d854e7bf15c5\") " pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.752963 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.769215 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.800747 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.892603 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.905229 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.916188 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.926394 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:36 crc kubenswrapper[5102]: I0123 06:54:36.938324 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:36Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.033198 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-bmrp4" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.035656 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.035704 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.035716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.035732 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.035742 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.065826 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerStarted","Data":"9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.079254 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.079298 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.079311 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.079325 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.090726 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.108497 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.128562 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.138061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.138105 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.138126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.138151 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.138169 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.150066 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.165030 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.272255 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.281166 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.281212 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.281222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.281241 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.281254 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.297758 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.313908 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.329253 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.355173 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.370591 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.389313 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.389366 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.389379 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.389399 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.389414 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.400614 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.411576 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.426007 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:37Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.494561 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.494612 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.494621 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.494640 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.494652 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.597229 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.597229 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:37 crc kubenswrapper[5102]: E0123 06:54:37.597367 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:37 crc kubenswrapper[5102]: E0123 06:54:37.597402 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.597239 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:37 crc kubenswrapper[5102]: E0123 06:54:37.597459 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.604921 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.604970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.604981 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.604998 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.605010 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.612936 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-15 08:19:06.030192956 +0000 UTC Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.707493 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.707534 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.707559 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.707576 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.707588 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.809749 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.809786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.809795 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.809809 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.809819 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.912147 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.912185 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.912196 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.912209 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:37 crc kubenswrapper[5102]: I0123 06:54:37.912220 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:37Z","lastTransitionTime":"2026-01-23T06:54:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.014595 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.014624 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.014633 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.014648 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.014656 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.084530 5102 generic.go:334] "Generic (PLEG): container finished" podID="b0672e4f-cd9f-47e6-8909-43e33fb9c254" containerID="9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b" exitCode=0 Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.084598 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerDied","Data":"9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.088512 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.088563 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.092209 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-bmrp4" event={"ID":"be881145-20bb-48fc-901b-d854e7bf15c5","Type":"ContainerStarted","Data":"52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.092241 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-bmrp4" event={"ID":"be881145-20bb-48fc-901b-d854e7bf15c5","Type":"ContainerStarted","Data":"f16009004f0a2f3fc688717424c2c1d9686bd1b3c99bbb94ecebc3a4847457bb"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.111337 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.117894 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.117925 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.117938 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.117956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.117971 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.135945 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.154024 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.177473 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.195339 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.212471 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.221975 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.222029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.222046 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.222071 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.222087 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.230418 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.249242 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.264922 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.279786 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.292251 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.305688 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.316269 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.324814 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.324834 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.324842 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.324855 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.324864 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.327419 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.341937 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.355270 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.376397 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.386483 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.398958 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.412336 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.424683 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.427506 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.427548 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.427557 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.427571 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.427580 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.442990 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.458186 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.473691 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.497557 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.517096 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.531672 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.531739 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.531758 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.531781 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.531801 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.534071 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.548369 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:38Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.613246 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 17:31:02.028254363 +0000 UTC Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.635832 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.635888 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.635910 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.635937 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.635955 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.739627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.739687 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.739709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.739741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.739769 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.843360 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.843420 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.843440 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.843463 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.843479 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.946478 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.946584 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.946604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.946632 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:38 crc kubenswrapper[5102]: I0123 06:54:38.946651 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:38Z","lastTransitionTime":"2026-01-23T06:54:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.050468 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.050534 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.050622 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.050663 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.050687 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.098810 5102 generic.go:334] "Generic (PLEG): container finished" podID="b0672e4f-cd9f-47e6-8909-43e33fb9c254" containerID="9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501" exitCode=0 Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.098882 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerDied","Data":"9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.122565 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.141269 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.154623 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.154666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.154701 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.154719 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.154732 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.159721 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.179284 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.200904 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.213240 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.225845 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.237188 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.249057 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.258434 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.258559 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.258595 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.258606 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.258623 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.258664 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.268827 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.277949 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.294209 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.306423 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.365635 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.365680 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.366036 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.366122 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.366133 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.446483 5102 transport.go:147] "Certificate rotation detected, shutting down client connections to start using new credentials" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.480023 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.480092 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.480116 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.480148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.480170 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.583399 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.583704 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.584012 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.584192 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.584324 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.597229 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.597234 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.597276 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:39 crc kubenswrapper[5102]: E0123 06:54:39.597887 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:39 crc kubenswrapper[5102]: E0123 06:54:39.598009 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:39 crc kubenswrapper[5102]: E0123 06:54:39.598148 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.612516 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.614497 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 10:03:19.696762422 +0000 UTC Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.632109 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.650082 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.663668 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.684243 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.686926 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.686970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.686980 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.686997 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.687010 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.705218 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.717656 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.727570 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.741843 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.754648 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.768929 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.785764 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.789679 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.789716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.789727 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.789743 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.789753 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.804101 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.819816 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.892360 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.892409 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.892421 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.892438 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.892449 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.995754 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.995791 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.995802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.995818 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:39 crc kubenswrapper[5102]: I0123 06:54:39.995829 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:39Z","lastTransitionTime":"2026-01-23T06:54:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.099298 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.099343 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.099360 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.099382 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.099399 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.107226 5102 generic.go:334] "Generic (PLEG): container finished" podID="b0672e4f-cd9f-47e6-8909-43e33fb9c254" containerID="95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7" exitCode=0 Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.107336 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerDied","Data":"95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.118330 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.129012 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.149507 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.170672 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.188008 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.203610 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.203658 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.203669 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.203686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.203700 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.204600 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.221149 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.238208 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.253380 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.265389 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.283713 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.297849 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.307591 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.307638 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.307654 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.307672 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.307684 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.315772 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.335556 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.345209 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.410367 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.410422 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.410440 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.410463 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.410479 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.514124 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.514405 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.514620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.514792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.515104 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.580007 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.580264 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.580330 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.580400 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.580462 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: E0123 06:54:40.601262 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.605558 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.605674 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.605733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.605833 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.605923 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.614961 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 09:57:22.644726029 +0000 UTC Jan 23 06:54:40 crc kubenswrapper[5102]: E0123 06:54:40.627308 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.631617 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.631844 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.631996 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.632165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.632368 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: E0123 06:54:40.656591 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.661256 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.661599 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.661759 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.661919 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.662129 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: E0123 06:54:40.683018 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.687630 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.687849 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.687989 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.688148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.688274 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: E0123 06:54:40.707276 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:40Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:40Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:40 crc kubenswrapper[5102]: E0123 06:54:40.707929 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.709741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.709788 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.709802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.709821 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.709834 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.812228 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.812470 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.812627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.812786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.812908 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.916248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.916650 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.916742 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.916835 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:40 crc kubenswrapper[5102]: I0123 06:54:40.916924 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:40Z","lastTransitionTime":"2026-01-23T06:54:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.020011 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.020059 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.020076 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.020100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.020118 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.122050 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.122230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.122256 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.122313 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.122333 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.125660 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerDied","Data":"1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.125625 5102 generic.go:334] "Generic (PLEG): container finished" podID="b0672e4f-cd9f-47e6-8909-43e33fb9c254" containerID="1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98" exitCode=0 Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.143131 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.157274 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.173438 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.197692 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.216287 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.225748 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.225813 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.225829 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.225854 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.225876 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.229311 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.245314 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.257825 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.271779 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.286875 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.301405 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.321197 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.328156 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.328199 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.328210 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.328228 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.328239 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.352389 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.367334 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:41Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.431661 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.431718 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.431731 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.431762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.431778 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.535143 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.535227 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.535249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.535281 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.535304 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.598001 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.598053 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.598007 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:41 crc kubenswrapper[5102]: E0123 06:54:41.598130 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:41 crc kubenswrapper[5102]: E0123 06:54:41.598267 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:41 crc kubenswrapper[5102]: E0123 06:54:41.598455 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.617392 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-05 21:53:54.231981126 +0000 UTC Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.637632 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.637673 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.637686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.637704 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.637716 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.741405 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.741457 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.741596 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.741618 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.741629 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.845385 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.845502 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.845527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.845580 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.845600 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.948085 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.948164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.948186 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.948216 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:41 crc kubenswrapper[5102]: I0123 06:54:41.948238 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:41Z","lastTransitionTime":"2026-01-23T06:54:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.051857 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.051898 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.051908 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.051927 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.051938 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.132517 5102 generic.go:334] "Generic (PLEG): container finished" podID="b0672e4f-cd9f-47e6-8909-43e33fb9c254" containerID="1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04" exitCode=0 Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.132600 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerDied","Data":"1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.139651 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.140218 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.153754 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.155833 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.155905 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.155925 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.155953 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.155972 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.172425 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.191025 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.209966 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.211753 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.226977 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.246395 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.259371 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.259581 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.259619 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.259632 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.259647 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.259660 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.275370 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.288944 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.306674 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.350158 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.361986 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.362019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.362031 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.362048 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.362059 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.364413 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.381302 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.394981 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.411691 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.422481 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.433302 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.445415 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.458090 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.463887 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.463924 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.463936 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.463956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.463978 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.469026 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.486689 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.500514 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.514698 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.528685 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.544341 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.560488 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.566910 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.566948 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.566959 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.566977 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.566989 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.587757 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.599162 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.618458 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-31 05:57:40.24801674 +0000 UTC Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.669709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.669740 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.669748 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.669761 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.669770 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.772508 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.772581 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.772591 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.772607 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.772617 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.876093 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.876776 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.876808 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.876845 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.876870 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.980903 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.980947 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.980961 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.980978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:42 crc kubenswrapper[5102]: I0123 06:54:42.980991 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:42Z","lastTransitionTime":"2026-01-23T06:54:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.083666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.083742 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.083760 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.083786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.083804 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.156241 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.157750 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" event={"ID":"b0672e4f-cd9f-47e6-8909-43e33fb9c254","Type":"ContainerStarted","Data":"5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.157844 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.185124 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.187205 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.187255 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.187274 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.187299 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.187316 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.203427 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.220716 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.239283 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.255736 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.262878 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.272217 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.289978 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.290195 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.290209 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.290217 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.290228 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.290239 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.304146 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.322844 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.344204 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.362414 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.378517 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.392825 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.392876 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.392887 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.392905 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.392916 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.395514 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.409367 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.425964 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.441186 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.459367 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.474439 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.491687 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.530237 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.530291 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.530305 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.530322 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.530332 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.531363 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.542129 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.542276 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.542327 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542372 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:54:59.542341264 +0000 UTC m=+50.362690249 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.542434 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542478 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542498 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.542502 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542511 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542561 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:59.542552861 +0000 UTC m=+50.362901826 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542625 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542703 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:59.542678845 +0000 UTC m=+50.363027850 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542743 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542770 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542813 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542831 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542783 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:59.542771918 +0000 UTC m=+50.363120933 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.542926 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:59.542899502 +0000 UTC m=+50.363248497 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.545364 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.562574 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.573396 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.587191 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.597102 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.597113 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.597292 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.597132 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.597343 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:43 crc kubenswrapper[5102]: E0123 06:54:43.597462 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.600524 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.612488 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.618773 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 09:27:30.539722544 +0000 UTC Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.629507 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.633863 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.633924 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.633948 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.633976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.634077 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.643839 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:43Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.737402 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.737444 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.737456 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.737473 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.737484 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.840410 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.840467 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.840519 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.840596 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.840688 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.944237 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.944324 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.944350 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.944387 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:43 crc kubenswrapper[5102]: I0123 06:54:43.944412 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:43Z","lastTransitionTime":"2026-01-23T06:54:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.047339 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.047383 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.047396 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.047412 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.047426 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.149915 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.149988 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.150013 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.150042 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.150065 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.159674 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.252900 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.252955 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.252973 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.252997 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.253015 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.355867 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.355929 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.355943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.355967 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.355983 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.464792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.464861 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.464884 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.464915 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.464937 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.567739 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.567804 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.567815 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.567835 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.567847 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.619273 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-09 02:04:10.028939919 +0000 UTC Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.670480 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.670575 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.670599 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.670630 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.670653 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.782454 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.782536 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.782590 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.782620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.782642 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.885631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.885935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.886017 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.886106 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.886192 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.988858 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.988914 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.988933 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.988956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:44 crc kubenswrapper[5102]: I0123 06:54:44.988975 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:44Z","lastTransitionTime":"2026-01-23T06:54:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.092414 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.092471 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.092489 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.092513 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.092530 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.163036 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.213940 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.214213 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.214230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.214246 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.214259 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.317110 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.317424 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.317441 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.317463 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.317480 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.419824 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.419878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.419915 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.419941 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.419959 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.522677 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.522744 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.522762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.522789 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.522806 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.598014 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.598184 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.598375 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:45 crc kubenswrapper[5102]: E0123 06:54:45.598582 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:45 crc kubenswrapper[5102]: E0123 06:54:45.598857 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:45 crc kubenswrapper[5102]: E0123 06:54:45.598964 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.620347 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 09:51:47.022820432 +0000 UTC Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.625873 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.625930 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.625948 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.625976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.625994 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.728803 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.728852 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.728868 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.728892 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.728910 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.832217 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.832299 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.832324 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.832356 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.832378 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.936334 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.936494 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.936520 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.936576 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:45 crc kubenswrapper[5102]: I0123 06:54:45.936602 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:45Z","lastTransitionTime":"2026-01-23T06:54:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.039576 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.039872 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.039999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.040156 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.040277 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.142590 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.142636 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.142645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.142660 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.142671 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.245664 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.245735 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.245759 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.245792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.245814 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.348721 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.348775 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.348792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.348815 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.348834 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.451989 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.452043 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.452052 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.452069 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.452080 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.555085 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.555143 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.555162 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.555187 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.555253 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.579656 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2"] Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.580166 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.592670 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.592770 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.614645 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.620491 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 09:38:09.926075347 +0000 UTC Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.637257 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.658319 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.658467 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.658516 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.658560 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.658585 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.658617 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.679734 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.705101 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.736423 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.741895 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.742513 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/aa980860-3ea9-4b1f-ae8f-d9caed98900d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.742581 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/aa980860-3ea9-4b1f-ae8f-d9caed98900d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.742833 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bx5xl\" (UniqueName: \"kubernetes.io/projected/aa980860-3ea9-4b1f-ae8f-d9caed98900d-kube-api-access-bx5xl\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.743078 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/aa980860-3ea9-4b1f-ae8f-d9caed98900d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.748649 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.762976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.763060 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.763080 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.763159 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.763189 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.767559 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.781315 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.794094 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.811389 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.824986 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.839024 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.844623 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/aa980860-3ea9-4b1f-ae8f-d9caed98900d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.844678 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/aa980860-3ea9-4b1f-ae8f-d9caed98900d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.844754 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bx5xl\" (UniqueName: \"kubernetes.io/projected/aa980860-3ea9-4b1f-ae8f-d9caed98900d-kube-api-access-bx5xl\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.844793 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/aa980860-3ea9-4b1f-ae8f-d9caed98900d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.845711 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/aa980860-3ea9-4b1f-ae8f-d9caed98900d-env-overrides\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.846115 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/aa980860-3ea9-4b1f-ae8f-d9caed98900d-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.852086 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/aa980860-3ea9-4b1f-ae8f-d9caed98900d-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.866244 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.866311 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.866331 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.866358 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.866380 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.868462 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.878281 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bx5xl\" (UniqueName: \"kubernetes.io/projected/aa980860-3ea9-4b1f-ae8f-d9caed98900d-kube-api-access-bx5xl\") pod \"ovnkube-control-plane-749d76644c-sndn2\" (UID: \"aa980860-3ea9-4b1f-ae8f-d9caed98900d\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.888598 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.909148 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.913997 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.920143 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: W0123 06:54:46.928108 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaa980860_3ea9_4b1f_ae8f_d9caed98900d.slice/crio-234bba57ae26f1fe689151ffbc27c7fa8b1d0f6c9cc22e2ad91acdf9ea7d42dd WatchSource:0}: Error finding container 234bba57ae26f1fe689151ffbc27c7fa8b1d0f6c9cc22e2ad91acdf9ea7d42dd: Status 404 returned error can't find the container with id 234bba57ae26f1fe689151ffbc27c7fa8b1d0f6c9cc22e2ad91acdf9ea7d42dd Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.939642 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.970079 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.972434 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.972527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.972583 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.972618 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.972643 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:46Z","lastTransitionTime":"2026-01-23T06:54:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:46 crc kubenswrapper[5102]: I0123 06:54:46.985531 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.002047 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:46Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.025690 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.042212 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.063641 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.075561 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.075597 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.075610 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.075631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.075642 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.085261 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.106698 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.126474 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.142126 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.172403 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.173097 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/0.log" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.177727 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.177752 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.177762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.177776 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.177786 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.178140 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82" exitCode=1 Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.178207 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.178861 5102 scope.go:117] "RemoveContainer" containerID="261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.180399 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" event={"ID":"aa980860-3ea9-4b1f-ae8f-d9caed98900d","Type":"ContainerStarted","Data":"234bba57ae26f1fe689151ffbc27c7fa8b1d0f6c9cc22e2ad91acdf9ea7d42dd"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.189579 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.210193 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.234090 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.255568 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.270719 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.279826 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.280218 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.280242 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.280270 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.280289 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.290710 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.307415 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.329328 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.349063 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.364387 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.375119 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.382565 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.382715 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.382900 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.383110 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.383217 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.395484 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\".Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.511993 6371 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.512057 6371 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 06:54:46.512877 6371 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:54:46.512924 6371 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:54:46.512965 6371 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:54:46.513016 6371 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:54:46.513035 6371 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:54:46.513034 6371 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:54:46.513012 6371 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:54:46.513065 6371 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 06:54:46.513102 6371 factory.go:656] Stopping watch factory\\\\nI0123 06:54:46.513110 6371 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:54:46.513127 6371 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.408131 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.427125 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.439241 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.453630 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.486132 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.486167 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.486174 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.486188 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.486198 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.588891 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.588963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.588976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.588995 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.589011 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.597385 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.597492 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:47 crc kubenswrapper[5102]: E0123 06:54:47.597699 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.597736 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:47 crc kubenswrapper[5102]: E0123 06:54:47.597813 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:47 crc kubenswrapper[5102]: E0123 06:54:47.597905 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.621116 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 17:56:36.393223686 +0000 UTC Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.694592 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-rmkhl"] Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.695257 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:47 crc kubenswrapper[5102]: E0123 06:54:47.695348 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.710661 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.723475 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.739689 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\".Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.511993 6371 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.512057 6371 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 06:54:46.512877 6371 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:54:46.512924 6371 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:54:46.512965 6371 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:54:46.513016 6371 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:54:46.513035 6371 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:54:46.513034 6371 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:54:46.513012 6371 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:54:46.513065 6371 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 06:54:46.513102 6371 factory.go:656] Stopping watch factory\\\\nI0123 06:54:46.513110 6371 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:54:46.513127 6371 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.752973 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.777280 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.794081 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.805136 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.817311 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.832660 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.840490 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.840558 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.840573 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.840596 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.840608 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.848641 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.854676 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bfb85\" (UniqueName: \"kubernetes.io/projected/a7d383f6-0729-4590-8252-46e50ea8ece8-kube-api-access-bfb85\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.854751 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.869998 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.892361 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.916195 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.935312 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.943113 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.943177 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.943189 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.943208 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.943236 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:47Z","lastTransitionTime":"2026-01-23T06:54:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.949263 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.956751 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.956865 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bfb85\" (UniqueName: \"kubernetes.io/projected/a7d383f6-0729-4590-8252-46e50ea8ece8-kube-api-access-bfb85\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:47 crc kubenswrapper[5102]: E0123 06:54:47.957250 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:47 crc kubenswrapper[5102]: E0123 06:54:47.957378 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:48.457340842 +0000 UTC m=+39.277689857 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.989219 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:47Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:47 crc kubenswrapper[5102]: I0123 06:54:47.997667 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bfb85\" (UniqueName: \"kubernetes.io/projected/a7d383f6-0729-4590-8252-46e50ea8ece8-kube-api-access-bfb85\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.087814 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.087864 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.087880 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.087903 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.087920 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.191432 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.191523 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.191560 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.191586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.191601 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.194416 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" event={"ID":"aa980860-3ea9-4b1f-ae8f-d9caed98900d","Type":"ContainerStarted","Data":"10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.194498 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" event={"ID":"aa980860-3ea9-4b1f-ae8f-d9caed98900d","Type":"ContainerStarted","Data":"b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.200866 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/0.log" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.210067 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.210519 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.214274 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.226854 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.239014 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.256685 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.268149 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.278369 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.288719 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.294345 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.294502 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.294612 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.294689 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.294749 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.298934 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.309006 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.319667 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.338723 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\".Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.511993 6371 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.512057 6371 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 06:54:46.512877 6371 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:54:46.512924 6371 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:54:46.512965 6371 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:54:46.513016 6371 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:54:46.513035 6371 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:54:46.513034 6371 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:54:46.513012 6371 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:54:46.513065 6371 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 06:54:46.513102 6371 factory.go:656] Stopping watch factory\\\\nI0123 06:54:46.513110 6371 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:54:46.513127 6371 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.350856 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.365353 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.379969 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.392860 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.397358 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.397415 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.397428 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.397445 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.397456 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.408062 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.421422 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.435032 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.450732 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.462427 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:48 crc kubenswrapper[5102]: E0123 06:54:48.462653 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:48 crc kubenswrapper[5102]: E0123 06:54:48.462731 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:49.462712175 +0000 UTC m=+40.283061171 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.468078 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.483295 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.499435 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.499476 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.499484 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.499499 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.499510 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.501226 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\".Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.511993 6371 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.512057 6371 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 06:54:46.512877 6371 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:54:46.512924 6371 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:54:46.512965 6371 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:54:46.513016 6371 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:54:46.513035 6371 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:54:46.513034 6371 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:54:46.513012 6371 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:54:46.513065 6371 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 06:54:46.513102 6371 factory.go:656] Stopping watch factory\\\\nI0123 06:54:46.513110 6371 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:54:46.513127 6371 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.512485 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.522436 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.534523 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.549111 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.584362 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.600601 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.601944 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.601997 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.602014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.602034 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.602049 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.615016 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.621521 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 18:15:31.134883116 +0000 UTC Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.628224 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.646438 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.663222 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:48Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.687524 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.704157 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.704406 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.704477 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.704626 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.704717 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.808587 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.808811 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.808875 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.808965 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.809045 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.912142 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.912219 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.912239 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.912267 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:48 crc kubenswrapper[5102]: I0123 06:54:48.912283 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:48Z","lastTransitionTime":"2026-01-23T06:54:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.016660 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.016721 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.016736 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.016757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.016773 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.119732 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.119794 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.119816 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.119842 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.119895 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.217583 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/1.log" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.218917 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/0.log" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.222222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.222287 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.222309 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.222339 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.222361 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.223301 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60" exitCode=1 Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.223358 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.223426 5102 scope.go:117] "RemoveContainer" containerID="261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.224736 5102 scope.go:117] "RemoveContainer" containerID="5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60" Jan 23 06:54:49 crc kubenswrapper[5102]: E0123 06:54:49.225189 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.250327 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.266082 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.284995 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.308349 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.325879 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.325955 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.325974 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.325997 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.326014 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.327890 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.345262 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.363093 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.378316 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.394158 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.410791 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.429017 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.429609 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.429899 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.430007 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.430102 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.430805 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.448888 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.477311 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\".Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.511993 6371 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.512057 6371 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 06:54:46.512877 6371 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:54:46.512924 6371 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:54:46.512965 6371 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:54:46.513016 6371 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:54:46.513035 6371 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:54:46.513034 6371 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:54:46.513012 6371 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:54:46.513065 6371 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 06:54:46.513102 6371 factory.go:656] Stopping watch factory\\\\nI0123 06:54:46.513110 6371 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:54:46.513127 6371 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"message\\\":\\\"roller.go:133] Setting up event handlers for Admin Network Policy\\\\nI0123 06:54:48.849210 6524 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0123 06:54:48.849307 6524 factory.go:656] Stopping watch factory\\\\nI0123 06:54:48.849320 6524 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:54:48.849340 6524 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI0123 06:54:48.849346 6524 obj_retry.go:409] Going to retry *v1.Pod resource setup for 1 objects: [openshift-multus/network-metrics-daemon-rmkhl]\\\\nI0123 06:54:48.849352 6524 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0123 06:54:48.849365 6524 obj_retry.go:285] Attempting retry of *v1.Pod openshift-multus/network-metrics-daemon-rmkhl before timer (time: 2026-01-23 06:54:49.918149396 +0000 UTC m=+2.403199277): skip\\\\nI0123 06:54:48.849375 6524 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 29.491µs)\\\\nI0123 06:54:48.849385 6524 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 06:54:48.849394 6524 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 06:54:48.849450 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.489199 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:49 crc kubenswrapper[5102]: E0123 06:54:49.489429 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:49 crc kubenswrapper[5102]: E0123 06:54:49.489596 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:51.489526257 +0000 UTC m=+42.309875432 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.498447 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.512959 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.532466 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.532527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.532586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.532616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.532634 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.534930 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.597817 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.597909 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:49 crc kubenswrapper[5102]: E0123 06:54:49.598022 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:54:49 crc kubenswrapper[5102]: E0123 06:54:49.598122 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.598221 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:49 crc kubenswrapper[5102]: E0123 06:54:49.598272 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.598381 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:49 crc kubenswrapper[5102]: E0123 06:54:49.598432 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.619896 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.622207 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 19:44:18.583917454 +0000 UTC Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.635068 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.635099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.635108 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.635123 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.635134 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.644640 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.671059 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://261809bdbba9bde5a57bf241f10209d830763d567fae74332a6b69bd5cc10e82\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"message\\\":\\\".Node (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.511993 6371 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:54:46.512057 6371 reflector.go:311] Stopping reflector *v1.EgressFirewall (0s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressfirewall/v1/apis/informers/externalversions/factory.go:140\\\\nI0123 06:54:46.512877 6371 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:54:46.512924 6371 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:54:46.512965 6371 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:54:46.513016 6371 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:54:46.513035 6371 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:54:46.513034 6371 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:54:46.513012 6371 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:54:46.513065 6371 handler.go:208] Removed *v1.Node event handler 2\\\\nI0123 06:54:46.513102 6371 factory.go:656] Stopping watch factory\\\\nI0123 06:54:46.513110 6371 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:54:46.513127 6371 ovnkube.go:599] Stopped ovnkube\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"message\\\":\\\"roller.go:133] Setting up event handlers for Admin Network Policy\\\\nI0123 06:54:48.849210 6524 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0123 06:54:48.849307 6524 factory.go:656] Stopping watch factory\\\\nI0123 06:54:48.849320 6524 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:54:48.849340 6524 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI0123 06:54:48.849346 6524 obj_retry.go:409] Going to retry *v1.Pod resource setup for 1 objects: [openshift-multus/network-metrics-daemon-rmkhl]\\\\nI0123 06:54:48.849352 6524 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0123 06:54:48.849365 6524 obj_retry.go:285] Attempting retry of *v1.Pod openshift-multus/network-metrics-daemon-rmkhl before timer (time: 2026-01-23 06:54:49.918149396 +0000 UTC m=+2.403199277): skip\\\\nI0123 06:54:48.849375 6524 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 29.491µs)\\\\nI0123 06:54:48.849385 6524 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 06:54:48.849394 6524 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 06:54:48.849450 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.702579 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.719803 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.733678 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.739410 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.739479 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.739499 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.739527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.739571 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.746004 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.762928 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.784847 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.807162 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.826133 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.843078 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.843137 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.843158 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.843186 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.843205 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.851580 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.871764 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.893990 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.914721 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.935751 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.946304 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.946370 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.946384 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.946411 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:49 crc kubenswrapper[5102]: I0123 06:54:49.946426 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:49Z","lastTransitionTime":"2026-01-23T06:54:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.049044 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.049107 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.049124 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.049149 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.049167 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.151862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.151922 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.151939 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.151965 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.151984 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.231122 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/1.log" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.238975 5102 scope.go:117] "RemoveContainer" containerID="5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60" Jan 23 06:54:50 crc kubenswrapper[5102]: E0123 06:54:50.239321 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.255642 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.255698 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.255717 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.255741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.255760 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.273489 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"message\\\":\\\"roller.go:133] Setting up event handlers for Admin Network Policy\\\\nI0123 06:54:48.849210 6524 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0123 06:54:48.849307 6524 factory.go:656] Stopping watch factory\\\\nI0123 06:54:48.849320 6524 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:54:48.849340 6524 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI0123 06:54:48.849346 6524 obj_retry.go:409] Going to retry *v1.Pod resource setup for 1 objects: [openshift-multus/network-metrics-daemon-rmkhl]\\\\nI0123 06:54:48.849352 6524 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0123 06:54:48.849365 6524 obj_retry.go:285] Attempting retry of *v1.Pod openshift-multus/network-metrics-daemon-rmkhl before timer (time: 2026-01-23 06:54:49.918149396 +0000 UTC m=+2.403199277): skip\\\\nI0123 06:54:48.849375 6524 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 29.491µs)\\\\nI0123 06:54:48.849385 6524 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 06:54:48.849394 6524 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 06:54:48.849450 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.291453 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.308634 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.330516 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.353632 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.359800 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.359901 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.359921 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.359945 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.360001 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.375782 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.395323 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.413067 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.430713 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.454343 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.463647 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.463693 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.463704 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.463721 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.463734 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.470800 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.486246 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.500102 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.517614 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.535145 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.552109 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.566934 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.566990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.567006 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.567029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.567045 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.623048 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-06 09:52:46.502125571 +0000 UTC Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.670449 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.670531 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.670601 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.670633 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.670664 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.773927 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.773981 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.773992 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.774009 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.774024 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.877586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.877661 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.877685 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.877777 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.877808 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.925105 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.925211 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.925228 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.925248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.925263 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: E0123 06:54:50.944611 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.949809 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.949878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.949906 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.949931 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.949951 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: E0123 06:54:50.969521 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.974782 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.974846 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.974865 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.974889 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:50 crc kubenswrapper[5102]: I0123 06:54:50.974907 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:50Z","lastTransitionTime":"2026-01-23T06:54:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:50 crc kubenswrapper[5102]: E0123 06:54:50.995272 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:50Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:50Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.000852 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.000899 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.000916 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.000939 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.000955 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.021975 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:51Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.027301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.027352 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.027369 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.027391 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.027406 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.047821 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:51Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:51Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.048064 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.050340 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.050414 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.050431 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.050458 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.050484 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.153494 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.153612 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.153638 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.153666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.153685 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.256848 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.256916 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.256933 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.256958 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.256976 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.360651 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.360704 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.360721 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.360746 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.360762 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.462975 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.463029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.463054 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.463081 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.463100 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.512763 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.513025 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.513152 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:54:55.513122959 +0000 UTC m=+46.333471964 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.566532 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.566605 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.566616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.566648 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.566659 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.597649 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.597773 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.597874 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.597896 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.597991 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.598129 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.598259 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:51 crc kubenswrapper[5102]: E0123 06:54:51.598386 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.623478 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-09 15:20:50.812050519 +0000 UTC Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.671223 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.671296 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.671315 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.671339 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.671358 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.774263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.774324 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.774340 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.774364 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.774384 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.878018 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.878085 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.878107 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.878140 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.878162 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.981943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.982323 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.982472 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.982657 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:51 crc kubenswrapper[5102]: I0123 06:54:51.982818 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:51Z","lastTransitionTime":"2026-01-23T06:54:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.086321 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.086423 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.086442 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.086500 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.086518 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.190467 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.190819 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.190989 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.191151 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.191270 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.294942 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.294988 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.295005 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.295029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.295045 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.398167 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.398241 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.398260 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.398282 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.398299 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.502397 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.502451 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.502478 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.502522 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.502579 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.605457 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.605533 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.605582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.605612 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.605631 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.624797 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-09 07:34:45.00176663 +0000 UTC Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.707752 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.707806 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.707822 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.707847 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.707865 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.811189 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.811270 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.811293 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.811325 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.811349 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.914637 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.915186 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.915360 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.915529 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:52 crc kubenswrapper[5102]: I0123 06:54:52.915777 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:52Z","lastTransitionTime":"2026-01-23T06:54:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.019882 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.019959 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.019976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.020047 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.020069 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.123402 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.123462 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.123479 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.123502 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.123520 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.226625 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.226690 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.226708 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.226735 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.226757 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.330084 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.330137 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.330149 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.330167 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.330179 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.435236 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.435318 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.435343 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.435370 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.435388 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.538361 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.538420 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.538436 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.538459 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.538477 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.597071 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.597114 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.597162 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:53 crc kubenswrapper[5102]: E0123 06:54:53.597267 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.597315 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:53 crc kubenswrapper[5102]: E0123 06:54:53.597355 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:53 crc kubenswrapper[5102]: E0123 06:54:53.597440 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:53 crc kubenswrapper[5102]: E0123 06:54:53.597533 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.625582 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 04:16:09.903804391 +0000 UTC Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.641457 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.641503 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.641522 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.641575 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.641593 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.744587 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.744662 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.744689 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.744719 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.744740 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.847854 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.847900 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.847911 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.847928 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.847941 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.950865 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.950933 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.950957 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.950987 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:53 crc kubenswrapper[5102]: I0123 06:54:53.951005 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:53Z","lastTransitionTime":"2026-01-23T06:54:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.053820 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.053892 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.053913 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.053945 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.053969 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.157405 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.157468 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.157485 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.157514 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.157531 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.260647 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.260710 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.260725 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.260755 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.260770 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.364890 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.364977 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.364999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.365033 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.365055 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.467878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.467950 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.467967 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.467993 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.468010 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.570935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.570983 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.570993 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.571008 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.571018 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.627163 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 22:05:12.221322459 +0000 UTC Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.674094 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.674152 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.674165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.674183 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.674194 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.777411 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.777461 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.777470 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.777487 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.777498 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.880126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.880273 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.880301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.880347 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.880373 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.984020 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.984071 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.984082 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.984101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:54 crc kubenswrapper[5102]: I0123 06:54:54.984111 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:54Z","lastTransitionTime":"2026-01-23T06:54:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.086816 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.086889 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.086911 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.086940 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.086961 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.191897 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.191994 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.192015 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.192044 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.192065 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.295141 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.295188 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.295201 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.295218 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.295229 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.399240 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.399311 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.399322 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.399341 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.399354 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.502609 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.502658 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.502675 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.502697 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.502716 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.565055 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:55 crc kubenswrapper[5102]: E0123 06:54:55.565254 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:55 crc kubenswrapper[5102]: E0123 06:54:55.565329 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:55:03.565310834 +0000 UTC m=+54.385659819 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.597684 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.597737 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.597810 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:55 crc kubenswrapper[5102]: E0123 06:54:55.597888 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.598034 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:55 crc kubenswrapper[5102]: E0123 06:54:55.598412 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:55 crc kubenswrapper[5102]: E0123 06:54:55.598649 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:54:55 crc kubenswrapper[5102]: E0123 06:54:55.598865 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.607780 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.607878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.607903 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.607927 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.607990 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.627717 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-01 06:41:57.73681221 +0000 UTC Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.710866 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.710943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.710956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.710980 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.710992 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.814380 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.814432 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.814454 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.814480 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.814497 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.923023 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.923105 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.923125 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.923149 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:55 crc kubenswrapper[5102]: I0123 06:54:55.923166 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:55Z","lastTransitionTime":"2026-01-23T06:54:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.026838 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.026907 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.026922 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.026945 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.026960 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.130485 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.130571 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.130585 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.130607 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.130622 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.234001 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.234090 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.234111 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.234142 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.234164 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.338090 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.338170 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.338193 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.338222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.338247 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.442214 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.442300 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.442320 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.442348 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.442366 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.545618 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.545689 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.545712 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.545741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.545762 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.628548 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-01 19:30:25.762109278 +0000 UTC Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.649072 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.649142 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.649160 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.649185 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.649203 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.753516 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.753580 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.753593 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.753614 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.753628 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.857598 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.857674 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.857696 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.857725 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.857746 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.961733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.961801 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.961819 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.961842 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:56 crc kubenswrapper[5102]: I0123 06:54:56.961860 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:56Z","lastTransitionTime":"2026-01-23T06:54:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.064768 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.064842 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.064855 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.064883 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.064898 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.167628 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.167722 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.167737 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.167756 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.167769 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.269715 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.269763 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.269773 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.269788 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.269800 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.372840 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.372901 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.372922 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.372943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.372956 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.476121 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.476189 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.476209 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.476237 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.476257 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.579986 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.580056 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.580067 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.580086 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.580098 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.597851 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.597954 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.597851 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:57 crc kubenswrapper[5102]: E0123 06:54:57.598153 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.598226 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:57 crc kubenswrapper[5102]: E0123 06:54:57.598412 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:57 crc kubenswrapper[5102]: E0123 06:54:57.598644 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:57 crc kubenswrapper[5102]: E0123 06:54:57.598773 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.629608 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-21 22:11:01.887227988 +0000 UTC Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.683099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.683246 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.683277 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.683310 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.683332 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.787826 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.787896 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.787914 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.787942 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.787967 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.891639 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.891698 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.891722 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.891745 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.891761 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.996168 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.996277 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.996303 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.996347 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:57 crc kubenswrapper[5102]: I0123 06:54:57.996377 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:57Z","lastTransitionTime":"2026-01-23T06:54:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.099985 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.100065 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.100096 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.100143 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.100165 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.204535 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.204652 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.204679 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.204724 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.204753 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.309103 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.309189 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.309215 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.309245 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.309264 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.413167 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.413286 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.413304 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.413330 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.413349 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.516315 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.516392 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.516417 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.516446 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.516469 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.619753 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.619834 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.619857 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.619887 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.619908 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.630198 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-23 15:24:25.569714364 +0000 UTC Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.723704 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.723765 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.723784 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.723810 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.723829 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.826980 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.827049 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.827071 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.827102 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.827123 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.930655 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.930737 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.930762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.930847 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:58 crc kubenswrapper[5102]: I0123 06:54:58.930914 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:58Z","lastTransitionTime":"2026-01-23T06:54:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.034862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.035151 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.035314 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.035464 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.035634 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.139317 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.139387 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.139408 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.139438 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.139490 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.242628 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.242688 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.242706 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.242731 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.242752 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.345775 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.345849 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.345869 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.345895 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.345914 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.448920 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.448962 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.448975 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.448992 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.449003 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.551407 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.551461 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.551472 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.551491 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.551504 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.597636 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.597794 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.597679 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.597947 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.598519 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.598614 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.599101 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.599322 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.616470 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.616700 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:55:31.616649004 +0000 UTC m=+82.436997989 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.616985 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.617200 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.617234 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.617253 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.617220 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.617313 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:55:31.617298674 +0000 UTC m=+82.437647659 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.617413 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.617868 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.617914 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.617940 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.618026 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:55:31.618000016 +0000 UTC m=+82.438349041 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.617851 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.618160 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.618417 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:55:31.618334586 +0000 UTC m=+82.438683711 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.618464 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: E0123 06:54:59.618786 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:55:31.618755628 +0000 UTC m=+82.439104803 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.619533 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.631377 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 22:28:28.794148899 +0000 UTC Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.635262 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.653188 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.654974 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.655033 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.655051 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.655073 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.655090 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.669152 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.691396 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.707884 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.726975 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.751282 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.759337 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.759386 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.759396 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.759416 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.759430 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.773527 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.795504 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.815857 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.831580 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.850882 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.863392 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.863488 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.863509 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.863567 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.863590 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.867992 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.892664 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"message\\\":\\\"roller.go:133] Setting up event handlers for Admin Network Policy\\\\nI0123 06:54:48.849210 6524 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0123 06:54:48.849307 6524 factory.go:656] Stopping watch factory\\\\nI0123 06:54:48.849320 6524 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:54:48.849340 6524 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI0123 06:54:48.849346 6524 obj_retry.go:409] Going to retry *v1.Pod resource setup for 1 objects: [openshift-multus/network-metrics-daemon-rmkhl]\\\\nI0123 06:54:48.849352 6524 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0123 06:54:48.849365 6524 obj_retry.go:285] Attempting retry of *v1.Pod openshift-multus/network-metrics-daemon-rmkhl before timer (time: 2026-01-23 06:54:49.918149396 +0000 UTC m=+2.403199277): skip\\\\nI0123 06:54:48.849375 6524 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 29.491µs)\\\\nI0123 06:54:48.849385 6524 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 06:54:48.849394 6524 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 06:54:48.849450 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.904821 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:54:59Z is after 2025-08-24T17:21:41Z" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.966697 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.966811 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.966832 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.966853 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:54:59 crc kubenswrapper[5102]: I0123 06:54:59.966868 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:54:59Z","lastTransitionTime":"2026-01-23T06:54:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.069958 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.070025 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.070047 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.070076 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.070097 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.173404 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.173469 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.173486 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.173510 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.173527 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.277165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.277216 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.277229 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.277246 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.277259 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.300751 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.314833 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.321333 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.343751 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.377340 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"message\\\":\\\"roller.go:133] Setting up event handlers for Admin Network Policy\\\\nI0123 06:54:48.849210 6524 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0123 06:54:48.849307 6524 factory.go:656] Stopping watch factory\\\\nI0123 06:54:48.849320 6524 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:54:48.849340 6524 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI0123 06:54:48.849346 6524 obj_retry.go:409] Going to retry *v1.Pod resource setup for 1 objects: [openshift-multus/network-metrics-daemon-rmkhl]\\\\nI0123 06:54:48.849352 6524 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0123 06:54:48.849365 6524 obj_retry.go:285] Attempting retry of *v1.Pod openshift-multus/network-metrics-daemon-rmkhl before timer (time: 2026-01-23 06:54:49.918149396 +0000 UTC m=+2.403199277): skip\\\\nI0123 06:54:48.849375 6524 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 29.491µs)\\\\nI0123 06:54:48.849385 6524 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 06:54:48.849394 6524 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 06:54:48.849450 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.380397 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.380476 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.380497 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.380531 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.380580 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.398179 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.416832 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.432615 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.447835 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.467734 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.483200 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.483264 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.483282 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.483301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.483313 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.496162 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.519168 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.539073 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.559717 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.572501 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.586231 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.586458 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.586484 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.586507 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.586521 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.587297 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.600533 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.614772 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:00Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.631499 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 09:07:47.890870422 +0000 UTC Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.688979 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.689012 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.689022 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.689037 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.689048 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.802693 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.802778 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.802800 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.802832 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.802852 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.906132 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.906197 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.906215 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.906239 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:00 crc kubenswrapper[5102]: I0123 06:55:00.906264 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:00Z","lastTransitionTime":"2026-01-23T06:55:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.009863 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.009932 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.009950 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.009978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.009995 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.102243 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.102357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.102388 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.102422 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.102445 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.125519 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:01Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.130833 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.130904 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.130923 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.130951 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.130969 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.151320 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:01Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.159254 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.159313 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.159331 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.159354 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.159372 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.181310 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:01Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.186862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.186923 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.186942 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.186966 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.186982 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.205405 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:01Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.211414 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.211462 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.211478 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.211501 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.211518 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.232624 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:01Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:01Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.232858 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.235803 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.235862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.235883 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.235911 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.235932 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.339137 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.339211 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.339232 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.339263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.339286 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.442858 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.442941 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.442967 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.442999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.443022 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.546670 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.546761 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.546788 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.546820 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.546841 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.597850 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.598091 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.598155 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.597873 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.597873 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.598295 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.598435 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:01 crc kubenswrapper[5102]: E0123 06:55:01.598732 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.632256 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-29 00:35:20.897768803 +0000 UTC Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.650774 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.650862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.650889 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.650920 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.650971 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.755034 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.755096 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.755139 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.755171 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.755197 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.858717 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.858784 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.858802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.858826 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.858845 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.962027 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.962093 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.962117 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.962147 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:01 crc kubenswrapper[5102]: I0123 06:55:01.962171 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:01Z","lastTransitionTime":"2026-01-23T06:55:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.066193 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.066260 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.066284 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.066319 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.066345 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.169677 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.169726 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.169737 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.169755 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.169768 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.274031 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.274100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.274131 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.274156 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.274173 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.377455 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.377533 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.377608 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.377641 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.377666 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.480469 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.480568 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.480592 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.480623 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.480646 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.583505 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.583594 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.583613 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.583636 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.583655 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.632856 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 21:23:48.71768809 +0000 UTC Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.686874 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.686927 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.686941 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.686961 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.686977 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.789911 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.789964 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.789980 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.790000 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.790015 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.892626 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.892671 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.892680 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.892695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.892704 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.996008 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.996058 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.996076 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.996099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:02 crc kubenswrapper[5102]: I0123 06:55:02.996117 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:02Z","lastTransitionTime":"2026-01-23T06:55:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.099199 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.099277 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.099299 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.099329 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.099351 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.203273 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.203351 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.203373 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.203448 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.203473 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.306572 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.306623 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.306640 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.306662 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.306679 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.409747 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.409821 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.409840 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.409869 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.409890 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.512631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.512676 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.512686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.512699 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.512710 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.565416 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:03 crc kubenswrapper[5102]: E0123 06:55:03.565656 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:55:03 crc kubenswrapper[5102]: E0123 06:55:03.565784 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:55:19.565766678 +0000 UTC m=+70.386115663 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.597744 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.597784 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.597913 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:03 crc kubenswrapper[5102]: E0123 06:55:03.597908 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.598471 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:03 crc kubenswrapper[5102]: E0123 06:55:03.598531 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:03 crc kubenswrapper[5102]: E0123 06:55:03.598608 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:03 crc kubenswrapper[5102]: E0123 06:55:03.598856 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.599701 5102 scope.go:117] "RemoveContainer" containerID="5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.615492 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.615598 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.615624 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.615650 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.615667 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.633119 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 11:14:22.526592628 +0000 UTC Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.718382 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.718678 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.718691 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.718709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.718720 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.823639 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.823708 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.823735 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.823765 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.823789 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.926682 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.926757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.926776 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.926798 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:03 crc kubenswrapper[5102]: I0123 06:55:03.926815 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:03Z","lastTransitionTime":"2026-01-23T06:55:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.063099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.063189 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.063201 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.063222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.063234 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.165945 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.165991 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.166003 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.166019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.166030 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.267977 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.268024 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.268035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.268051 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.268059 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.292735 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/1.log" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.297048 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.298312 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.324618 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"message\\\":\\\"roller.go:133] Setting up event handlers for Admin Network Policy\\\\nI0123 06:54:48.849210 6524 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0123 06:54:48.849307 6524 factory.go:656] Stopping watch factory\\\\nI0123 06:54:48.849320 6524 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:54:48.849340 6524 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI0123 06:54:48.849346 6524 obj_retry.go:409] Going to retry *v1.Pod resource setup for 1 objects: [openshift-multus/network-metrics-daemon-rmkhl]\\\\nI0123 06:54:48.849352 6524 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0123 06:54:48.849365 6524 obj_retry.go:285] Attempting retry of *v1.Pod openshift-multus/network-metrics-daemon-rmkhl before timer (time: 2026-01-23 06:54:49.918149396 +0000 UTC m=+2.403199277): skip\\\\nI0123 06:54:48.849375 6524 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 29.491µs)\\\\nI0123 06:54:48.849385 6524 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 06:54:48.849394 6524 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 06:54:48.849450 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.336314 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.353047 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.368515 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.370283 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.370354 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.370373 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.370401 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.370434 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.384702 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.401583 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.412031 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.428419 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.443401 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.458026 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.470318 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.473326 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.473374 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.473385 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.473400 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.473411 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.486067 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.514674 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.549518 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.561178 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.574907 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.576579 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.576621 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.576634 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.576651 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.576799 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.586133 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:04Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.633698 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 17:55:44.792824106 +0000 UTC Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.679962 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.680014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.680032 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.680057 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.680077 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.783726 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.783759 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.783768 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.783782 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.783792 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.886963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.887029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.887041 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.887060 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.887073 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.989952 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.990018 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.990035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.990059 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:04 crc kubenswrapper[5102]: I0123 06:55:04.990079 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:04Z","lastTransitionTime":"2026-01-23T06:55:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.092814 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.092871 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.092888 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.092916 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.092954 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.197128 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.197198 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.197216 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.197242 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.197290 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.300336 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.300397 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.300414 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.300441 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.300459 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.304797 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/2.log" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.305703 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/1.log" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.310236 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb" exitCode=1 Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.310295 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.310348 5102 scope.go:117] "RemoveContainer" containerID="5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.311363 5102 scope.go:117] "RemoveContainer" containerID="3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb" Jan 23 06:55:05 crc kubenswrapper[5102]: E0123 06:55:05.311660 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.336571 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.360492 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.388285 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.405762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.405834 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.405853 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.405886 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.405907 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.412918 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.433591 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.451123 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.473028 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.502596 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.509103 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.509148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.509164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.509188 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.509205 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.521443 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.539095 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.552479 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.571682 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.589889 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.597845 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.597890 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:05 crc kubenswrapper[5102]: E0123 06:55:05.598053 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.598218 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.598214 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:05 crc kubenswrapper[5102]: E0123 06:55:05.598486 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:05 crc kubenswrapper[5102]: E0123 06:55:05.598613 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:05 crc kubenswrapper[5102]: E0123 06:55:05.598813 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.612087 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.612155 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.612179 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.612209 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.612230 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.621818 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5b619e89f600d87c00526e8f2dc0b55a6684ab2d33bd58d9aee1d0236fae8a60\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"message\\\":\\\"roller.go:133] Setting up event handlers for Admin Network Policy\\\\nI0123 06:54:48.849210 6524 handler.go:190] Sending *v1.Pod event handler 3 for removal\\\\nI0123 06:54:48.849307 6524 factory.go:656] Stopping watch factory\\\\nI0123 06:54:48.849320 6524 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:54:48.849340 6524 obj_retry.go:434] periodicallyRetryResources: Retry channel got triggered: retrying failed objects of type *v1.Pod\\\\nI0123 06:54:48.849346 6524 obj_retry.go:409] Going to retry *v1.Pod resource setup for 1 objects: [openshift-multus/network-metrics-daemon-rmkhl]\\\\nI0123 06:54:48.849352 6524 obj_retry.go:418] Waiting for all the *v1.Pod retry setup to complete in iterateRetryResources\\\\nI0123 06:54:48.849365 6524 obj_retry.go:285] Attempting retry of *v1.Pod openshift-multus/network-metrics-daemon-rmkhl before timer (time: 2026-01-23 06:54:49.918149396 +0000 UTC m=+2.403199277): skip\\\\nI0123 06:54:48.849375 6524 obj_retry.go:420] Function iterateRetryResources for *v1.Pod ended (in 29.491µs)\\\\nI0123 06:54:48.849385 6524 handler.go:208] Removed *v1.Pod event handler 3\\\\nI0123 06:54:48.849394 6524 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF0123 06:54:48.849450 6524 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.635044 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 07:44:48.5552783 +0000 UTC Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.636668 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.649720 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.666186 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:05Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.715232 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.715293 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.715314 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.715338 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.715361 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.819199 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.819272 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.819330 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.819358 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.819377 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.923408 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.923830 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.924062 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.924239 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:05 crc kubenswrapper[5102]: I0123 06:55:05.924400 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:05Z","lastTransitionTime":"2026-01-23T06:55:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.027798 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.028284 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.028812 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.029230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.029674 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.132692 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.132756 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.132774 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.132804 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.132821 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.236081 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.236136 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.236153 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.236178 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.236194 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.317871 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/2.log" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.324565 5102 scope.go:117] "RemoveContainer" containerID="3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb" Jan 23 06:55:06 crc kubenswrapper[5102]: E0123 06:55:06.324936 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.339988 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.340056 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.340082 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.340113 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.340137 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.353872 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.374917 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.399463 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.424296 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.441651 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.443457 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.443500 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.443509 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.443524 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.443533 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.455147 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.475272 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.493665 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.514363 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.530641 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.546586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.546712 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.546800 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.546865 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.546931 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.549492 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.568713 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.589020 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.614524 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.635723 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-18 16:59:07.80633633 +0000 UTC Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.636642 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.649505 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.650482 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.650527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.650566 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.650591 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.650608 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.660882 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:06Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.754043 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.754358 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.754565 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.754717 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.754870 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.858437 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.858499 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.858517 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.858571 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.858589 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.961704 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.961772 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.961797 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.961833 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:06 crc kubenswrapper[5102]: I0123 06:55:06.961857 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:06Z","lastTransitionTime":"2026-01-23T06:55:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.064934 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.064971 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.064982 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.064998 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.065010 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.168802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.169035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.169122 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.169288 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.169381 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.272741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.272785 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.272795 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.272807 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.272820 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.375042 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.375099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.375113 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.375129 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.375141 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.479145 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.479232 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.479262 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.479293 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.479314 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.582899 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.582946 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.582964 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.582987 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.583006 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.597862 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:07 crc kubenswrapper[5102]: E0123 06:55:07.598031 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.598289 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:07 crc kubenswrapper[5102]: E0123 06:55:07.598394 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.598632 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:07 crc kubenswrapper[5102]: E0123 06:55:07.598724 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.599036 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:07 crc kubenswrapper[5102]: E0123 06:55:07.599153 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.636680 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-14 23:10:21.240529588 +0000 UTC Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.686267 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.686319 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.686333 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.686358 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.686372 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.789529 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.789645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.789665 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.789694 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.790595 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.893359 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.893435 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.893456 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.893484 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.893505 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.996438 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.996489 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.996508 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.996524 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:07 crc kubenswrapper[5102]: I0123 06:55:07.996552 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:07Z","lastTransitionTime":"2026-01-23T06:55:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.099745 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.099801 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.099817 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.099838 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.099853 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.202275 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.202367 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.202390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.202415 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.202433 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.306117 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.306169 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.306189 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.306213 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.306237 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.409947 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.410058 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.410088 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.410120 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.410213 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.513091 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.513137 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.513150 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.513165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.513175 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.614931 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.614976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.614994 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.615014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.615027 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.637621 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-04 22:40:50.067012922 +0000 UTC Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.718999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.719062 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.719078 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.719106 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.719123 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.823139 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.823249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.823267 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.823346 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.823372 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.926930 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.926979 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.926990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.927008 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:08 crc kubenswrapper[5102]: I0123 06:55:08.927021 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:08Z","lastTransitionTime":"2026-01-23T06:55:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.030568 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.030646 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.030665 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.030695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.030713 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.133959 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.134022 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.134045 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.134075 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.134100 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.237422 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.237586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.237613 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.237642 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.237660 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.340388 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.340453 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.340470 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.340496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.340514 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.443750 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.443810 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.443825 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.443849 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.443866 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.546531 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.546638 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.546660 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.546688 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.546710 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.597368 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.597423 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.597482 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:09 crc kubenswrapper[5102]: E0123 06:55:09.598255 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.597765 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:09 crc kubenswrapper[5102]: E0123 06:55:09.598397 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:09 crc kubenswrapper[5102]: E0123 06:55:09.598636 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:09 crc kubenswrapper[5102]: E0123 06:55:09.599631 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.619505 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.638750 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 02:18:07.370943362 +0000 UTC Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.640716 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.649978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.650042 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.650061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.650089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.650107 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.660773 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.682000 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.706906 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.726857 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.753625 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.753670 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.753685 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.753709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.753725 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.758843 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.776965 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.793686 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.815149 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.829433 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.846306 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.856224 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.856375 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.856393 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.856474 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.856495 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.869164 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.886486 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.903781 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.922971 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.941893 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:09Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.959792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.959836 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.959849 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.959866 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:09 crc kubenswrapper[5102]: I0123 06:55:09.959879 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:09Z","lastTransitionTime":"2026-01-23T06:55:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.061957 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.062030 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.062044 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.062063 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.062075 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.165214 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.165265 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.165277 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.165296 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.165310 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.269249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.269305 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.269326 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.269354 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.269376 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.371956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.372032 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.372101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.372143 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.372170 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.475564 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.475615 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.475631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.475653 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.475667 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.579043 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.579092 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.579103 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.579122 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.579134 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.640019 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 01:02:04.105026239 +0000 UTC Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.681794 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.681914 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.681926 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.681948 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.681963 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.784561 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.784591 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.784600 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.784616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.784624 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.888190 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.888244 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.888259 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.888278 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.888291 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.990882 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.990933 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.990949 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.990973 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:10 crc kubenswrapper[5102]: I0123 06:55:10.990990 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:10Z","lastTransitionTime":"2026-01-23T06:55:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.093637 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.093776 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.093800 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.093829 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.093851 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.197966 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.198056 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.198090 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.198120 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.198141 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.300817 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.300893 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.300919 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.300949 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.300970 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.403311 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.403371 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.403392 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.403415 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.403431 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.505691 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.505758 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.505775 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.505797 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.505815 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.591424 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.591534 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.591587 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.591612 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.591631 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.604340 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.604589 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.605023 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.605191 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.605532 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.605754 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.606601 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.606768 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.618939 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:11Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.624231 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.624294 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.624319 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.624349 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.624373 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.640451 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 11:08:28.95521486 +0000 UTC Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.646618 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:11Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.651212 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.651263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.651279 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.651300 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.651311 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.670470 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:11Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.675153 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.675193 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.675204 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.675222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.675234 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.692573 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:11Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.696847 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.696900 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.696917 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.696941 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.696959 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.714091 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:11Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:11Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:11 crc kubenswrapper[5102]: E0123 06:55:11.714245 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.716469 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.716497 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.716509 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.716523 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.716534 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.820969 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.821023 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.821040 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.821063 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.821079 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.923656 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.923713 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.923733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.923757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:11 crc kubenswrapper[5102]: I0123 06:55:11.923775 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:11Z","lastTransitionTime":"2026-01-23T06:55:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.027161 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.027228 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.027249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.027301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.027319 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.130908 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.130955 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.130993 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.131010 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.131024 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.234709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.234819 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.234847 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.234878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.234900 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.338014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.338107 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.338125 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.338149 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.338168 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.441251 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.441331 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.441356 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.441389 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.441410 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.543504 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.543557 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.543573 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.543589 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.543599 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.611600 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.641093 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-26 10:20:37.504359603 +0000 UTC Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.646606 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.646649 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.646664 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.646682 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.646694 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.750186 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.750237 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.750247 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.750265 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.750276 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.852554 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.852586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.852594 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.852608 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.852619 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.955432 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.955495 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.955506 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.955530 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:12 crc kubenswrapper[5102]: I0123 06:55:12.955571 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:12Z","lastTransitionTime":"2026-01-23T06:55:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.058949 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.059003 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.059016 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.059035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.059051 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.162557 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.162609 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.162620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.162636 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.162649 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.265894 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.265959 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.265979 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.266003 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.266019 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.369855 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.369907 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.369921 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.369939 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.369956 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.473565 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.473607 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.473616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.473631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.473643 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.576316 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.576379 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.576390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.576410 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.576423 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.597735 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.597770 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.597822 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.597868 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:13 crc kubenswrapper[5102]: E0123 06:55:13.598025 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:13 crc kubenswrapper[5102]: E0123 06:55:13.598163 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:13 crc kubenswrapper[5102]: E0123 06:55:13.598277 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:13 crc kubenswrapper[5102]: E0123 06:55:13.598360 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.641969 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 16:55:55.691927917 +0000 UTC Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.679872 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.679923 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.679935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.679954 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.679966 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.783242 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.783304 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.783322 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.783350 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.783368 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.885790 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.885826 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.885836 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.885850 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.885861 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.988061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.988141 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.988154 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.988171 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:13 crc kubenswrapper[5102]: I0123 06:55:13.988460 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:13Z","lastTransitionTime":"2026-01-23T06:55:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.090867 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.090922 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.090936 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.090959 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.090976 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.193464 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.193498 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.193507 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.193519 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.193527 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.295664 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.295708 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.295716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.295732 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.295741 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.398135 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.398206 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.398219 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.398235 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.398246 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.501039 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.501089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.501107 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.501133 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.501148 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.603720 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.603771 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.603780 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.603793 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.603804 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.642137 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 23:56:38.276530359 +0000 UTC Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.706029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.706072 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.706083 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.706101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.706110 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.808669 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.808713 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.808724 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.808739 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.808748 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.911413 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.911464 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.911480 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.911500 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:14 crc kubenswrapper[5102]: I0123 06:55:14.911512 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:14Z","lastTransitionTime":"2026-01-23T06:55:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.014272 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.014320 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.014333 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.014351 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.014363 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.116648 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.116699 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.116712 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.116729 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.116741 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.219057 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.219138 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.219156 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.219186 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.219201 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.321971 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.322024 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.322037 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.322059 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.322071 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.425356 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.425424 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.425446 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.425476 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.425498 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.529771 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.529843 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.529857 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.529884 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.529909 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.597581 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.597644 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.597607 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.597734 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:15 crc kubenswrapper[5102]: E0123 06:55:15.597819 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:15 crc kubenswrapper[5102]: E0123 06:55:15.597760 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:15 crc kubenswrapper[5102]: E0123 06:55:15.597964 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:15 crc kubenswrapper[5102]: E0123 06:55:15.598143 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.633284 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.633339 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.633352 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.633371 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.633385 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.642307 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 12:22:46.955930982 +0000 UTC Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.737024 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.737111 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.737128 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.737181 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.737196 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.840083 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.840143 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.840161 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.840188 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.840207 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.942951 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.943024 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.943051 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.943083 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:15 crc kubenswrapper[5102]: I0123 06:55:15.943114 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:15Z","lastTransitionTime":"2026-01-23T06:55:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.046263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.046301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.046313 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.046329 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.046341 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.149161 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.149197 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.149208 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.149223 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.149235 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.253521 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.253604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.253620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.253645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.253669 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.355862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.355907 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.355951 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.356193 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.356228 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.458454 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.458484 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.458493 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.458506 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.458514 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.560768 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.560796 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.560803 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.560817 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.560827 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.642648 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 14:21:29.338333967 +0000 UTC Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.664437 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.664482 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.664505 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.664564 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.664588 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.768269 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.768318 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.768326 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.768342 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.768353 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.871176 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.871235 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.871249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.871270 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.871288 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.974035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.974071 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.974082 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.974101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:16 crc kubenswrapper[5102]: I0123 06:55:16.974114 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:16Z","lastTransitionTime":"2026-01-23T06:55:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.077183 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.077259 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.077278 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.077306 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.077324 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.180441 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.180515 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.180533 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.180627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.180665 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.283104 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.283164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.283182 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.283206 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.283225 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.385206 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.385252 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.385263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.385281 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.385293 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.487686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.487738 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.487752 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.487770 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.487781 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.590861 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.590914 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.590925 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.590943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.590956 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.597596 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.597656 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.597668 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.597602 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:17 crc kubenswrapper[5102]: E0123 06:55:17.597788 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:17 crc kubenswrapper[5102]: E0123 06:55:17.597898 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:17 crc kubenswrapper[5102]: E0123 06:55:17.597964 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:17 crc kubenswrapper[5102]: E0123 06:55:17.598020 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.643281 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-02 13:36:48.547915701 +0000 UTC Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.694646 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.694710 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.694730 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.694752 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.694769 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.797983 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.798040 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.798057 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.798081 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.798097 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.900160 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.900210 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.900219 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.900235 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:17 crc kubenswrapper[5102]: I0123 06:55:17.900247 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:17Z","lastTransitionTime":"2026-01-23T06:55:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.002981 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.003019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.003030 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.003052 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.003062 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.106056 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.106094 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.106101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.106116 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.106126 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.208626 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.208662 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.208673 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.208689 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.208699 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.311089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.311138 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.311171 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.311191 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.311203 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.413025 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.413073 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.413081 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.413096 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.413105 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.515921 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.515963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.515971 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.515985 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.515995 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.618104 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.618167 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.618179 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.618199 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.618209 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.643609 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-12 15:10:39.490960373 +0000 UTC Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.721600 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.721654 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.721676 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.721695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.721707 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.824622 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.824667 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.824676 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.824692 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.824708 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.928496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.928582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.928600 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.928620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:18 crc kubenswrapper[5102]: I0123 06:55:18.928639 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:18Z","lastTransitionTime":"2026-01-23T06:55:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.031576 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.031632 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.031650 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.031671 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.031688 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.134394 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.134437 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.134448 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.134462 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.134472 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.237438 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.237513 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.237570 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.237602 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.237625 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.339990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.340069 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.340094 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.340127 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.340150 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.442820 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.442872 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.442883 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.442897 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.442939 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.544814 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.544877 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.544893 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.544918 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.544936 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.597296 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:19 crc kubenswrapper[5102]: E0123 06:55:19.597423 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.597318 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.597328 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.597291 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:19 crc kubenswrapper[5102]: E0123 06:55:19.598114 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:19 crc kubenswrapper[5102]: E0123 06:55:19.598200 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:19 crc kubenswrapper[5102]: E0123 06:55:19.598242 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.598777 5102 scope.go:117] "RemoveContainer" containerID="3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb" Jan 23 06:55:19 crc kubenswrapper[5102]: E0123 06:55:19.599099 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.611596 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.632131 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.637775 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:19 crc kubenswrapper[5102]: E0123 06:55:19.638244 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:55:19 crc kubenswrapper[5102]: E0123 06:55:19.638463 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:55:51.638426817 +0000 UTC m=+102.458775992 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.643883 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 19:28:06.938030962 +0000 UTC Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.647532 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.649674 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.649728 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.649738 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.649752 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.649761 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.661291 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.678076 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.693952 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.711307 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.722615 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.732849 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.744253 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.752357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.752403 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.752416 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.752432 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.752445 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.768891 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.779777 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.793727 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.808081 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.825007 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.840687 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.854794 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.854853 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.854868 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.854889 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.854904 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.856118 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.872674 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:19Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.958165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.958254 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.958272 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.958296 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:19 crc kubenswrapper[5102]: I0123 06:55:19.958314 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:19Z","lastTransitionTime":"2026-01-23T06:55:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.061237 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.061286 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.061300 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.061317 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.061330 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.163311 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.163334 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.163343 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.163355 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.163363 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.265236 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.265276 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.265287 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.265302 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.265312 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.367463 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.367532 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.367592 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.367622 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.367647 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.470775 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.470820 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.470835 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.470867 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.470884 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.574411 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.574454 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.574466 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.574482 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.574493 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.645044 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-06 07:49:25.086619818 +0000 UTC Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.677620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.677851 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.678045 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.678191 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.678333 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.781491 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.781555 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.781567 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.781584 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.781596 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.884180 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.884230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.884248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.884270 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.884287 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.987038 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.987110 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.987121 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.987138 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:20 crc kubenswrapper[5102]: I0123 06:55:20.987150 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:20Z","lastTransitionTime":"2026-01-23T06:55:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.089645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.089675 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.089684 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.089698 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.089707 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.192108 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.192152 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.192163 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.192179 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.192190 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.294891 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.294985 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.295004 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.295025 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.295041 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.397087 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.397117 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.397126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.397140 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.397149 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.500829 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.500926 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.500949 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.501014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.501036 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.597887 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.597932 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.597965 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:21 crc kubenswrapper[5102]: E0123 06:55:21.597995 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.598054 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:21 crc kubenswrapper[5102]: E0123 06:55:21.598135 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:21 crc kubenswrapper[5102]: E0123 06:55:21.598154 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:21 crc kubenswrapper[5102]: E0123 06:55:21.598198 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.603570 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.603604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.603615 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.603629 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.603639 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.646261 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 11:16:16.37285322 +0000 UTC Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.706268 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.706300 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.706308 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.706322 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.706348 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.808441 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.808513 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.808529 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.808582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.808646 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.911435 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.911471 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.911481 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.911498 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:21 crc kubenswrapper[5102]: I0123 06:55:21.911508 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:21Z","lastTransitionTime":"2026-01-23T06:55:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.013744 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.013791 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.013802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.013819 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.013829 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.080862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.080963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.080982 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.081008 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.081026 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: E0123 06:55:22.099503 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.103881 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.103963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.103979 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.103995 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.104038 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: E0123 06:55:22.120939 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.124574 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.124622 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.124634 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.124651 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.124664 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: E0123 06:55:22.136117 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.141527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.141616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.141633 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.141659 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.141676 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: E0123 06:55:22.153174 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.157657 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.157708 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.157737 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.157762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.157783 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: E0123 06:55:22.168982 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: E0123 06:55:22.169300 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.171930 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.171981 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.171997 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.172021 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.172038 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.274790 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.274847 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.274867 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.274892 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.274910 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.377802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.377837 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.377846 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.377858 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.377868 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.380075 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/0.log" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.380116 5102 generic.go:334] "Generic (PLEG): container finished" podID="c1446a26-ae38-40f3-a313-8604f5e98285" containerID="006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af" exitCode=1 Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.380151 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerDied","Data":"006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.380576 5102 scope.go:117] "RemoveContainer" containerID="006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.400395 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.413458 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.431648 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.448410 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.460413 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.469569 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.481290 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.481327 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.481338 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.481353 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.481364 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.486924 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.498619 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.512873 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.523708 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.541669 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.556445 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.567946 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:22Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.577766 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.585157 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.585193 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.585205 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.585222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.585233 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.587080 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.597977 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.611294 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.628762 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:22Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.646687 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-17 17:51:45.529975338 +0000 UTC Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.688051 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.688084 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.688093 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.688108 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.688119 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.791722 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.791792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.791815 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.791844 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.791865 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.893943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.893999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.894015 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.894037 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.894052 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.996666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.996715 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.996728 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.996746 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:22 crc kubenswrapper[5102]: I0123 06:55:22.996760 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:22Z","lastTransitionTime":"2026-01-23T06:55:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.099354 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.099397 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.099406 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.099517 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.099528 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.202453 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.202517 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.202565 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.202592 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.202616 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.305763 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.305814 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.305828 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.305846 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.305859 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.389051 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/0.log" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.389149 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerStarted","Data":"0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.402791 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.408330 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.408376 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.408396 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.408418 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.408434 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.423879 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.450898 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.463125 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.475204 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.488223 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.501223 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.511533 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.511611 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.511625 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.511643 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.511655 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.516014 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.528099 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.543112 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.557405 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.567703 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.582805 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.595623 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.597584 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.597625 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.597678 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.597584 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:23 crc kubenswrapper[5102]: E0123 06:55:23.597718 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:23 crc kubenswrapper[5102]: E0123 06:55:23.597797 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:23 crc kubenswrapper[5102]: E0123 06:55:23.597883 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:23 crc kubenswrapper[5102]: E0123 06:55:23.597930 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.607030 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.643105 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.643709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.643744 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.643753 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.643767 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.643776 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.647001 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-08 19:18:13.317604813 +0000 UTC Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.661111 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.674102 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:23Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.747280 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.747357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.747375 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.747402 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.747420 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.850686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.850733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.850741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.850755 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.850765 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.953236 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.953294 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.953310 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.953333 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:23 crc kubenswrapper[5102]: I0123 06:55:23.953351 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:23Z","lastTransitionTime":"2026-01-23T06:55:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.055812 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.055860 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.055871 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.055888 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.055901 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.158088 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.158126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.158135 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.158149 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.158159 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.261355 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.261425 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.261449 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.261481 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.261503 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.364689 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.364742 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.364757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.364778 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.364794 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.466906 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.466950 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.466961 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.466976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.466986 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.569590 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.569638 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.569646 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.569662 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.569672 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.647994 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-08 09:05:11.978224967 +0000 UTC Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.671953 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.671990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.671998 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.672013 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.672025 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.774151 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.774204 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.774220 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.774235 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.774246 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.877000 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.877058 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.877067 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.877083 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.877092 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.980387 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.980443 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.980457 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.980475 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:24 crc kubenswrapper[5102]: I0123 06:55:24.980489 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:24Z","lastTransitionTime":"2026-01-23T06:55:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.099309 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.099354 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.099363 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.099379 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.099392 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.202139 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.202219 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.202236 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.202263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.202282 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.304570 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.304626 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.304638 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.304656 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.304669 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.406238 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.406285 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.406299 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.406323 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.406335 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.509061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.509139 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.509155 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.509178 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.509192 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.597339 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.597421 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.597480 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:25 crc kubenswrapper[5102]: E0123 06:55:25.597683 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.597715 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:25 crc kubenswrapper[5102]: E0123 06:55:25.597846 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:25 crc kubenswrapper[5102]: E0123 06:55:25.597972 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:25 crc kubenswrapper[5102]: E0123 06:55:25.598166 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.612100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.612148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.612206 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.612229 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.612247 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.648130 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 02:05:56.884671654 +0000 UTC Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.715230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.715301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.715316 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.715340 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.715355 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.819015 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.819068 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.819078 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.819098 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.819111 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.922471 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.922572 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.922597 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.922644 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:25 crc kubenswrapper[5102]: I0123 06:55:25.922748 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:25Z","lastTransitionTime":"2026-01-23T06:55:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.026074 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.026153 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.026176 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.026202 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.026219 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.128860 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.128908 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.128926 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.128949 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.128961 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.232285 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.232338 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.232357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.232381 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.232398 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.335868 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.335927 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.335944 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.335969 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.335989 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.439661 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.439770 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.439796 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.439828 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.439851 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.542988 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.543049 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.543067 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.543091 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.543111 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.646805 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.646860 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.646871 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.646890 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.646903 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.649091 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-18 16:12:03.803062627 +0000 UTC Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.749909 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.749959 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.749970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.749986 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.749998 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.853526 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.853638 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.853655 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.853684 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.853704 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.956422 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.956472 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.956479 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.956494 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:26 crc kubenswrapper[5102]: I0123 06:55:26.956503 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:26Z","lastTransitionTime":"2026-01-23T06:55:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.059668 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.059727 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.059749 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.059779 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.059798 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.162911 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.162956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.162967 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.162983 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.162994 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.266454 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.266574 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.266615 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.266644 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.266666 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.369757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.369844 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.369872 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.369902 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.369925 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.474177 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.474255 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.474285 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.474311 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.474328 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.577842 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.577929 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.577964 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.577995 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.578018 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.597362 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.597390 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.597416 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:27 crc kubenswrapper[5102]: E0123 06:55:27.597629 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.597720 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:27 crc kubenswrapper[5102]: E0123 06:55:27.597817 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:27 crc kubenswrapper[5102]: E0123 06:55:27.597937 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:27 crc kubenswrapper[5102]: E0123 06:55:27.598123 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.649969 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-16 11:52:11.935611668 +0000 UTC Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.680970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.681034 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.681049 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.681075 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.681094 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.784619 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.784714 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.784733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.784758 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.784777 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.889625 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.889705 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.889718 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.889744 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.889760 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.993074 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.993142 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.993160 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.993185 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:27 crc kubenswrapper[5102]: I0123 06:55:27.993203 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:27Z","lastTransitionTime":"2026-01-23T06:55:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.096694 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.096752 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.096782 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.096804 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.096819 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.200276 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.200355 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.200375 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.200400 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.200419 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.303624 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.303682 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.303690 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.303714 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.303724 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.406645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.406690 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.406702 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.406718 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.406733 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.509968 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.510038 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.510056 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.510079 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.510099 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.613717 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.613783 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.613804 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.613829 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.613847 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.651133 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 17:00:55.712362861 +0000 UTC Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.715978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.716265 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.716418 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.716514 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.716678 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.820234 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.820299 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.820314 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.820332 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.820344 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.923164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.923262 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.923281 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.923304 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:28 crc kubenswrapper[5102]: I0123 06:55:28.923321 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:28Z","lastTransitionTime":"2026-01-23T06:55:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.025919 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.025971 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.025988 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.026010 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.026026 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.129787 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.129865 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.129882 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.129905 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.129921 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.233321 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.233398 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.233421 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.233453 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.233477 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.336774 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.336938 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.337019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.337055 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.337076 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.438908 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.438970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.438988 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.439016 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.439040 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.541508 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.541620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.541645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.541676 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.541704 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.598047 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.598126 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.598291 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:29 crc kubenswrapper[5102]: E0123 06:55:29.598375 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.598391 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:29 crc kubenswrapper[5102]: E0123 06:55:29.598511 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:29 crc kubenswrapper[5102]: E0123 06:55:29.598605 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:29 crc kubenswrapper[5102]: E0123 06:55:29.598823 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.615521 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.635688 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.644584 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.644658 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.644681 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.644715 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.644740 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.652252 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-24 02:13:40.217811507 +0000 UTC Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.658263 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.678508 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.694048 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.712261 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.740909 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.752958 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.768498 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.785449 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.799191 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.811720 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.819519 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.819576 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.819586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.819600 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.819610 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.826679 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.841967 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.855910 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.867153 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.886440 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.896817 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:29Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.921496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.921554 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.921566 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.921583 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:29 crc kubenswrapper[5102]: I0123 06:55:29.921594 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:29Z","lastTransitionTime":"2026-01-23T06:55:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.023965 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.024017 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.024051 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.024086 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.024196 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.126627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.126687 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.126706 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.126762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.126780 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.230072 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.230148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.230187 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.230218 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.230242 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.332992 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.333079 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.333099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.333127 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.333154 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.435672 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.435746 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.435805 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.435835 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.435858 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.539177 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.539233 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.539253 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.539276 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.539293 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.641984 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.642038 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.642052 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.642071 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.642085 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.652795 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-02 10:28:37.624628789 +0000 UTC Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.745330 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.745390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.745408 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.745432 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.745452 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.848415 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.848482 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.848498 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.848524 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.848567 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.951823 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.951908 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.951942 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.951972 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:30 crc kubenswrapper[5102]: I0123 06:55:30.951994 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:30Z","lastTransitionTime":"2026-01-23T06:55:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.054880 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.054995 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.055036 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.055070 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.055097 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.158157 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.158233 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.158256 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.158288 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.158310 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.261088 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.261134 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.261146 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.261165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.261175 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.364062 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.364105 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.364117 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.364130 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.364139 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.466624 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.466692 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.466710 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.466738 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.466756 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.569989 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.570066 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.570089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.570123 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.570150 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.597560 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.597573 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.597697 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.598068 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.598115 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.598565 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.598769 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.598890 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.599104 5102 scope.go:117] "RemoveContainer" containerID="3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.631680 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.631898 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.631964 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:35.631924743 +0000 UTC m=+146.452273758 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.632020 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.632108 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632164 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632225 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632251 5102 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632297 5102 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632323 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:56:35.632304255 +0000 UTC m=+146.452653260 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632373 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2026-01-23 06:56:35.632352117 +0000 UTC m=+146.452701132 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632255 5102 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632458 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2026-01-23 06:56:35.632439709 +0000 UTC m=+146.452788724 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.632175 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632732 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632893 5102 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.632927 5102 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:55:31 crc kubenswrapper[5102]: E0123 06:55:31.633233 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2026-01-23 06:56:35.633184693 +0000 UTC m=+146.453533998 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.653107 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 21:58:37.435480459 +0000 UTC Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.673494 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.673597 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.673620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.673650 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.673678 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.776047 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.776786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.776802 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.777582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.777646 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.880647 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.880702 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.880715 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.880731 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.880743 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.983185 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.983243 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.983253 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.983276 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:31 crc kubenswrapper[5102]: I0123 06:55:31.983287 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:31Z","lastTransitionTime":"2026-01-23T06:55:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.086922 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.086982 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.086995 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.087011 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.087022 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.189699 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.189729 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.189736 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.189750 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.189758 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.335275 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.335328 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.335341 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.335359 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.335371 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.428328 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/2.log" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.432203 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.432770 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.437992 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.438035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.438046 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.438061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.438073 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.455112 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.482406 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.492574 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.492608 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.492617 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.492630 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.492639 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.512214 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: E0123 06:55:32.518442 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.529320 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.529388 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.529417 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.529440 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.529457 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.531466 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: E0123 06:55:32.547899 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.552503 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.552569 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.552583 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.552604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.552622 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.553129 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.565027 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: E0123 06:55:32.565013 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.569264 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.569300 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.569315 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.569331 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.569341 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.574577 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: E0123 06:55:32.583149 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.587052 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.601370 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.617239 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.629280 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.644597 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.645961 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.646019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.646033 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.646049 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.646060 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.654367 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 14:25:47.194524021 +0000 UTC Jan 23 06:55:32 crc kubenswrapper[5102]: E0123 06:55:32.660012 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:32Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: E0123 06:55:32.660169 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.661719 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.661758 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.661774 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.661792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.661805 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.663679 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.681911 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.699636 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.712769 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.730061 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.764242 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.764284 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.764292 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.764306 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.764316 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.785571 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:32Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.866627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.866677 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.866689 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.866707 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.866719 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.969234 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.969303 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.969326 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.969350 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:32 crc kubenswrapper[5102]: I0123 06:55:32.969365 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:32Z","lastTransitionTime":"2026-01-23T06:55:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.072688 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.072755 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.072772 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.072798 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.072817 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.175786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.176038 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.176059 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.176087 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.176105 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.278958 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.279023 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.279045 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.279070 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.279089 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.381691 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.381743 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.381754 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.381772 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.381784 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.439044 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/3.log" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.440897 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/2.log" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.445630 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741" exitCode=1 Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.445710 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.445839 5102 scope.go:117] "RemoveContainer" containerID="3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.447150 5102 scope.go:117] "RemoveContainer" containerID="d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741" Jan 23 06:55:33 crc kubenswrapper[5102]: E0123 06:55:33.447488 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.476846 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3aab3d6d0f5f744e6dc278a4444bd444f9647f3f1f3f34694b713898f4f0e1bb\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:04Z\\\",\\\"message\\\":\\\"e (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI0123 06:55:04.762301 6711 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI0123 06:55:04.762356 6711 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI0123 06:55:04.762392 6711 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI0123 06:55:04.762408 6711 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI0123 06:55:04.762407 6711 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI0123 06:55:04.762437 6711 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI0123 06:55:04.762458 6711 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI0123 06:55:04.762461 6711 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI0123 06:55:04.762492 6711 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI0123 06:55:04.762506 6711 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI0123 06:55:04.762566 6711 factory.go:656] Stopping watch factory\\\\nI0123 06:55:04.762577 6711 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI0123 06:55:04.762588 6711 ovnkube.go:599] Stopped ovnkube\\\\nI0123 06:55:04.762595 6711 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI0123 06:55:04.762609 6711 handler.go:208] Removed *v1.Node event handler 2\\\\nI01\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:03Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:33Z\\\",\\\"message\\\":\\\"getPort:{0 10259 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{scheduler: true,},ClusterIP:10.217.4.169,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.169],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0123 06:55:33.074157 7130 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fbvf7 in node crc\\\\nF0123 06:55:33.074002 7130 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-nod\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.485900 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.485962 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.485980 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.486007 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.486028 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.497485 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.516312 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.535828 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.551999 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.571653 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.584174 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.589427 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.589475 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.589490 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.589513 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.589528 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.597827 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.597895 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.597955 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.597960 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:33 crc kubenswrapper[5102]: E0123 06:55:33.598156 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:33 crc kubenswrapper[5102]: E0123 06:55:33.598495 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:33 crc kubenswrapper[5102]: E0123 06:55:33.598689 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:33 crc kubenswrapper[5102]: E0123 06:55:33.598803 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.602879 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.624146 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.645522 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.654465 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 07:47:51.321884192 +0000 UTC Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.660898 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.675872 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.691716 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.692935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.692994 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.693014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.693042 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.693061 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.708423 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.723025 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.737876 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.756774 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.773634 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:33Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.796479 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.796582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.796651 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.796691 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.796711 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.900365 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.900416 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.900428 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.900446 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:33 crc kubenswrapper[5102]: I0123 06:55:33.900462 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:33Z","lastTransitionTime":"2026-01-23T06:55:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.003447 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.003832 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.003862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.008695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.008748 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.113091 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.113438 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.113581 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.113702 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.113846 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.217913 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.217982 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.218001 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.218033 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.218050 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.321003 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.321064 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.321082 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.321106 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.321123 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.424855 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.424934 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.424956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.424985 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.425004 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.451837 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/3.log" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.457025 5102 scope.go:117] "RemoveContainer" containerID="d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741" Jan 23 06:55:34 crc kubenswrapper[5102]: E0123 06:55:34.457261 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.474322 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.494089 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.513026 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.528372 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.528442 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.528460 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.528489 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.528510 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.542868 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:33Z\\\",\\\"message\\\":\\\"getPort:{0 10259 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{scheduler: true,},ClusterIP:10.217.4.169,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.169],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0123 06:55:33.074157 7130 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fbvf7 in node crc\\\\nF0123 06:55:33.074002 7130 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-nod\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.557816 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.579811 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.598627 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.618715 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.631306 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.631347 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.631363 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.631386 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.631402 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.636303 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.652876 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.654937 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 18:33:29.404564059 +0000 UTC Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.675609 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.695474 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.710182 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.731213 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.734582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.734646 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.734665 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.734688 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.734705 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.751753 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.773749 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.793879 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.809087 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:34Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.837061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.837119 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.837132 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.837152 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.837165 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.940222 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.940671 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.940875 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.941046 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:34 crc kubenswrapper[5102]: I0123 06:55:34.941222 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:34Z","lastTransitionTime":"2026-01-23T06:55:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.044492 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.044582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.044602 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.044627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.044645 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.148086 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.148178 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.148200 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.148236 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.148263 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.251624 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.251693 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.251716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.251746 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.251769 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.355250 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.355322 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.355340 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.355370 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.355394 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.493798 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.493859 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.493874 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.493898 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.493912 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.596971 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597034 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597092 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597113 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597199 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597231 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597250 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:35 crc kubenswrapper[5102]: E0123 06:55:35.597442 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.597534 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:35 crc kubenswrapper[5102]: E0123 06:55:35.597667 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:35 crc kubenswrapper[5102]: E0123 06:55:35.597829 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:35 crc kubenswrapper[5102]: E0123 06:55:35.598064 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.655408 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-20 03:06:52.418925719 +0000 UTC Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.699643 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.699717 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.699733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.699757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.699782 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.803439 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.803514 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.803596 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.803631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.803650 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.907234 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.907278 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.907289 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.907305 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:35 crc kubenswrapper[5102]: I0123 06:55:35.907316 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:35Z","lastTransitionTime":"2026-01-23T06:55:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.010668 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.010728 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.010750 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.010776 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.010798 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.113567 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.113631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.113643 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.113659 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.113670 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.216197 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.216260 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.216282 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.216310 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.216330 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.319496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.319621 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.319651 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.319680 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.319698 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.422417 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.422469 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.422483 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.422501 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.422515 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.525481 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.525609 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.525638 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.525670 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.525694 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.628235 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.628322 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.628345 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.628377 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.628399 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.655628 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-12 00:45:38.084509597 +0000 UTC Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.731691 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.731756 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.731771 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.731794 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.731811 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.836393 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.836500 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.836562 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.836592 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.836614 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.940834 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.941023 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.941063 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.941100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:36 crc kubenswrapper[5102]: I0123 06:55:36.941147 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:36Z","lastTransitionTime":"2026-01-23T06:55:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.044920 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.044977 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.044990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.045007 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.045023 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.148743 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.148805 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.148822 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.148849 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.148867 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.251557 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.251616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.251624 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.251639 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.251649 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.354792 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.354917 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.354935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.355001 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.355020 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.458751 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.458831 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.458851 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.458878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.458896 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.562325 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.562419 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.562443 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.562475 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.562500 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.597363 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.597436 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.597436 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:37 crc kubenswrapper[5102]: E0123 06:55:37.597517 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.597632 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:37 crc kubenswrapper[5102]: E0123 06:55:37.597780 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:37 crc kubenswrapper[5102]: E0123 06:55:37.598138 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:37 crc kubenswrapper[5102]: E0123 06:55:37.598246 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.656583 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 23:39:51.174768973 +0000 UTC Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.665636 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.665698 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.665716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.665741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.665760 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.768724 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.768780 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.768798 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.768820 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.768837 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.872334 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.872382 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.872394 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.872411 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.872419 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.976323 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.976429 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.976461 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.976498 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:37 crc kubenswrapper[5102]: I0123 06:55:37.976534 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:37Z","lastTransitionTime":"2026-01-23T06:55:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.079957 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.080044 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.080099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.080136 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.080153 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.183251 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.183310 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.183327 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.183352 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.183370 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.286567 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.287083 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.287105 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.287130 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.287152 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.390432 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.390951 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.390983 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.391009 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.391025 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.494150 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.494213 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.494227 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.494250 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.494263 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.597390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.597487 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.597515 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.597600 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.597628 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.657179 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 20:23:14.186000996 +0000 UTC Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.701532 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.701646 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.701666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.701692 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.701709 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.805422 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.805501 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.805523 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.805583 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.805608 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.908286 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.908342 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.908358 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.908381 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:38 crc kubenswrapper[5102]: I0123 06:55:38.908394 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:38Z","lastTransitionTime":"2026-01-23T06:55:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.010913 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.010973 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.010997 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.011030 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.011055 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.114567 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.114658 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.114670 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.114690 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.114704 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.217306 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.217386 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.217409 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.217440 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.217463 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.320631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.320706 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.320729 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.320762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.320783 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.424579 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.424639 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.424656 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.424684 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.424700 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.527310 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.527366 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.527382 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.527404 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.527421 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.597515 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.597622 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.597699 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:39 crc kubenswrapper[5102]: E0123 06:55:39.597822 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.597885 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:39 crc kubenswrapper[5102]: E0123 06:55:39.598027 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:39 crc kubenswrapper[5102]: E0123 06:55:39.598456 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:39 crc kubenswrapper[5102]: E0123 06:55:39.598512 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.620612 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.630349 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.630411 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.630430 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.630458 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.630475 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.641005 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.658325 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-08 18:44:55.33278584 +0000 UTC Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.671472 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:33Z\\\",\\\"message\\\":\\\"getPort:{0 10259 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{scheduler: true,},ClusterIP:10.217.4.169,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.169],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0123 06:55:33.074157 7130 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fbvf7 in node crc\\\\nF0123 06:55:33.074002 7130 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-nod\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.686949 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.715631 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.732014 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.734599 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.734644 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.734656 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.734675 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.734686 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.744289 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.756862 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.770742 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.790219 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.803709 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.817444 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.832747 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.836717 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.836772 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.836786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.836805 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.836820 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.843858 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.856133 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.870022 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.883430 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.900518 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:39Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.945965 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.946061 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.946080 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.946108 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:39 crc kubenswrapper[5102]: I0123 06:55:39.946125 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:39Z","lastTransitionTime":"2026-01-23T06:55:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.049777 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.049894 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.049922 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.049953 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.049977 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.152576 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.152635 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.152654 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.152679 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.152699 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.255416 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.255477 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.255496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.255520 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.255564 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.358681 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.358741 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.358759 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.358784 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.358802 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.461696 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.461754 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.461774 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.461796 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.461814 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.564640 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.564724 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.564745 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.564777 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.564798 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.658476 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-16 19:18:58.127771596 +0000 UTC Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.667976 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.668059 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.668082 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.668112 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.668130 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.770418 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.770486 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.770508 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.770579 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.770609 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.873144 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.873224 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.873248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.873289 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.873310 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.976805 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.976856 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.976867 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.976882 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:40 crc kubenswrapper[5102]: I0123 06:55:40.976893 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:40Z","lastTransitionTime":"2026-01-23T06:55:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.079916 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.079978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.079994 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.080019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.080036 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.183240 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.183310 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.183329 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.183362 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.183383 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.286606 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.286700 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.286727 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.286762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.286800 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.389463 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.389526 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.389597 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.389635 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.389653 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.493086 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.493165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.493180 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.493199 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.493210 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.596095 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.596152 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.596167 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.596188 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.596201 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.597298 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.597335 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:41 crc kubenswrapper[5102]: E0123 06:55:41.597426 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.597304 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.597478 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:41 crc kubenswrapper[5102]: E0123 06:55:41.597648 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:41 crc kubenswrapper[5102]: E0123 06:55:41.597685 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:41 crc kubenswrapper[5102]: E0123 06:55:41.597776 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.658831 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-12 22:18:33.596413838 +0000 UTC Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.699564 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.699613 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.699629 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.699653 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.699667 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.802874 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.802937 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.802954 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.802978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.803002 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.905956 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.906011 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.906020 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.906035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:41 crc kubenswrapper[5102]: I0123 06:55:41.906046 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:41Z","lastTransitionTime":"2026-01-23T06:55:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.009505 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.009628 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.009652 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.009751 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.009778 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.113686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.113743 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.113758 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.113778 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.113792 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.218280 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.218328 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.218339 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.218358 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.218373 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.322508 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.322602 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.322620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.322645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.322667 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.425362 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.425430 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.425448 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.425473 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.425491 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.528525 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.528579 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.528590 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.528604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.528614 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.631172 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.631568 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.631665 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.631760 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.631845 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.658933 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-19 07:21:11.891327654 +0000 UTC Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.734503 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.734560 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.734569 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.734584 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.734597 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.837592 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.837673 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.837696 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.837729 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.837752 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.855294 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.855376 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.855394 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.855420 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.855438 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: E0123 06:55:42.876049 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.883666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.883777 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.883849 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.883885 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.883950 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: E0123 06:55:42.905683 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.911296 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.911356 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.911381 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.911411 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.911433 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: E0123 06:55:42.926814 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.931461 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.931527 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.931586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.931617 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.931641 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: E0123 06:55:42.947849 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.952238 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.952289 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.952307 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.952330 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.952346 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:42 crc kubenswrapper[5102]: E0123 06:55:42.971855 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:42Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"},\\\"runtimeHandlers\\\":[{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":false},\\\"name\\\":\\\"runc\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"crun\\\"},{\\\"features\\\":{\\\"recursiveReadOnlyMounts\\\":true,\\\"userNamespaces\\\":true},\\\"name\\\":\\\"\\\"}]}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:42Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:42 crc kubenswrapper[5102]: E0123 06:55:42.972135 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.974331 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.974394 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.974413 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.974439 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:42 crc kubenswrapper[5102]: I0123 06:55:42.974458 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:42Z","lastTransitionTime":"2026-01-23T06:55:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.076930 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.077275 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.077390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.077472 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.077587 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.180057 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.180104 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.180119 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.180141 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.180155 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.283454 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.283535 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.283581 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.283608 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.283625 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.386372 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.386426 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.386437 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.386456 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.386468 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.488816 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.488910 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.488936 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.488971 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.488997 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.592420 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.592486 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.592505 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.592531 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.592586 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.597989 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.597985 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.598234 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.598273 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:43 crc kubenswrapper[5102]: E0123 06:55:43.598380 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:43 crc kubenswrapper[5102]: E0123 06:55:43.598498 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:43 crc kubenswrapper[5102]: E0123 06:55:43.598701 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:43 crc kubenswrapper[5102]: E0123 06:55:43.598879 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.659981 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-18 16:43:29.699443971 +0000 UTC Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.695982 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.696338 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.696482 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.696699 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.696843 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.800581 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.800630 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.800643 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.800663 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.800676 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.905063 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.905164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.905194 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.905248 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:43 crc kubenswrapper[5102]: I0123 06:55:43.905277 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:43Z","lastTransitionTime":"2026-01-23T06:55:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.008832 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.008891 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.008908 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.008932 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.008949 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.111686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.112228 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.112494 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.112657 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.112803 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.215902 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.215953 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.215970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.215995 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.216012 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.319221 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.319268 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.319280 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.319300 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.319313 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.421959 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.422015 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.422027 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.422047 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.422061 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.525467 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.525582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.525602 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.525627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.525644 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.629693 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.629779 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.629799 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.629882 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.629904 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.661477 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-11 03:20:53.191255687 +0000 UTC Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.733174 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.733238 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.733254 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.733274 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.733292 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.836395 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.836450 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.836470 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.836496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.836516 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.940580 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.940656 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.940679 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.940701 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:44 crc kubenswrapper[5102]: I0123 06:55:44.940719 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:44Z","lastTransitionTime":"2026-01-23T06:55:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.044029 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.044086 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.044106 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.044144 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.044162 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.149337 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.149390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.149405 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.149428 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.149445 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.253574 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.253652 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.253673 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.253703 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.253722 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.356592 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.356672 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.356694 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.356723 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.356747 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.459826 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.459928 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.459954 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.459990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.460019 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.563191 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.563428 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.563572 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.563690 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.563953 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.600398 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:45 crc kubenswrapper[5102]: E0123 06:55:45.600763 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.600937 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:45 crc kubenswrapper[5102]: E0123 06:55:45.601045 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.601163 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:45 crc kubenswrapper[5102]: E0123 06:55:45.601271 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.601372 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:45 crc kubenswrapper[5102]: E0123 06:55:45.601470 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.662565 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-25 16:49:09.635631217 +0000 UTC Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.667062 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.667242 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.667312 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.667440 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.667581 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.771238 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.771793 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.771940 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.772100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.772225 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.875830 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.875878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.875889 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.875907 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.875919 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.979325 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.979368 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.979379 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.979397 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:45 crc kubenswrapper[5102]: I0123 06:55:45.979411 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:45Z","lastTransitionTime":"2026-01-23T06:55:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.082437 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.082487 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.082495 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.082511 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.082520 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.185675 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.185744 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.185762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.185785 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.185802 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.290258 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.290303 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.290321 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.290349 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.290371 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.393640 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.393705 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.393729 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.393761 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.393786 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.497944 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.498001 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.498013 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.498031 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.498047 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.605709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.606299 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.606365 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.606619 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.606644 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.662908 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-25 16:50:50.284220315 +0000 UTC Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.709249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.709819 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.710111 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.710279 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.710416 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.813191 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.813238 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.813251 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.813269 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.813283 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.915506 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.916529 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.916866 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.917164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:46 crc kubenswrapper[5102]: I0123 06:55:46.917521 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:46Z","lastTransitionTime":"2026-01-23T06:55:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.020186 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.020249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.020268 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.020289 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.020305 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.122700 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.123028 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.123164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.123292 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.123407 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.227340 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.227420 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.227446 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.227477 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.227499 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.330390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.330862 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.331048 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.331176 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.331700 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.435099 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.435157 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.435175 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.435202 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.435221 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.538532 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.538672 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.538684 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.538706 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.538718 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.598076 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.598132 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.598132 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.598262 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:47 crc kubenswrapper[5102]: E0123 06:55:47.598383 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:47 crc kubenswrapper[5102]: E0123 06:55:47.598472 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:47 crc kubenswrapper[5102]: E0123 06:55:47.598691 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:47 crc kubenswrapper[5102]: E0123 06:55:47.598783 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.641616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.641671 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.641688 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.641716 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.641735 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.664026 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-28 20:56:04.385607554 +0000 UTC Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.744223 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.744263 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.744271 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.744283 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.744292 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.847561 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.847603 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.847614 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.847630 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.847641 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.953439 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.954022 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.954157 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.954371 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:47 crc kubenswrapper[5102]: I0123 06:55:47.954458 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:47Z","lastTransitionTime":"2026-01-23T06:55:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.057349 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.057604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.057715 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.057801 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.057896 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.160351 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.160413 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.160423 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.160438 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.160464 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.263145 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.263198 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.263209 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.263229 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.263243 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.365991 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.366062 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.366073 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.366089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.366101 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.468928 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.468965 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.468974 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.468990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.469008 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.571603 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.571667 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.571686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.571712 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.571730 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.599781 5102 scope.go:117] "RemoveContainer" containerID="d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741" Jan 23 06:55:48 crc kubenswrapper[5102]: E0123 06:55:48.600085 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.664837 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 17:01:12.875252573 +0000 UTC Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.674751 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.674935 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.674950 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.674970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.674983 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.778421 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.778496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.778514 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.778586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.778607 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.886198 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.886294 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.886354 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.886381 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.886406 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.989142 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.989192 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.989208 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.989285 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:48 crc kubenswrapper[5102]: I0123 06:55:48.989306 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:48Z","lastTransitionTime":"2026-01-23T06:55:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.093202 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.093267 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.093280 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.093299 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.093314 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.198423 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.198496 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.198522 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.198586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.198614 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.303387 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.303484 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.303525 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.303602 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.303633 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.406813 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.406858 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.406868 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.406883 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.406893 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.510078 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.510141 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.510160 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.510187 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.510204 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.597561 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.597717 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:49 crc kubenswrapper[5102]: E0123 06:55:49.597846 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.597937 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.597979 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:49 crc kubenswrapper[5102]: E0123 06:55:49.598186 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:49 crc kubenswrapper[5102]: E0123 06:55:49.598349 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:49 crc kubenswrapper[5102]: E0123 06:55:49.598516 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.613155 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.613290 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.613308 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.613328 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.613343 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.617949 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://23bbed8ca75a0347154d8f0cdcc81eabcc2d84695849ece9259eac388653a6b9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a49ab6c104152b3b1674847d8fd7f52dc54fcbf5cec9103e096681a2c6c27cb9\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.638084 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://441275b7211b74b6f8b166cb7b3b376f5948b706d6857511a3d5edca214d1025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.660914 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:33Z\\\",\\\"message\\\":\\\"getPort:{0 10259 },NodePort:0,AppProtocol:nil,},},Selector:map[string]string{scheduler: true,},ClusterIP:10.217.4.169,Type:ClusterIP,ExternalIPs:[],SessionAffinity:None,LoadBalancerIP:,LoadBalancerSourceRanges:[],ExternalName:,ExternalTrafficPolicy:,HealthCheckNodePort:0,PublishNotReadyAddresses:false,SessionAffinityConfig:nil,IPFamilyPolicy:*SingleStack,ClusterIPs:[10.217.4.169],IPFamilies:[IPv4],AllocateLoadBalancerNodePorts:nil,LoadBalancerClass:nil,InternalTrafficPolicy:*Cluster,TrafficDistribution:nil,},Status:ServiceStatus{LoadBalancer:LoadBalancerStatus{Ingress:[]LoadBalancerIngress{},},Conditions:[]Condition{},},}\\\\nI0123 06:55:33.074157 7130 ovn.go:134] Ensuring zone local for Pod openshift-multus/multus-additional-cni-plugins-fbvf7 in node crc\\\\nF0123 06:55:33.074002 7130 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-nod\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:55:31Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sjfbl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:33Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-cgkqt\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.665794 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-27 14:56:50.996507825 +0000 UTC Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.676396 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-bmrp4" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be881145-20bb-48fc-901b-d854e7bf15c5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://52b2e53b09052404d3f58cce271f989a59e0f3c0c5e3fd86344144015a68ac87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:37Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-xvkpn\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:36Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-bmrp4\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.693920 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"a7d383f6-0729-4590-8252-46e50ea8ece8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:47Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bfb85\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:47Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-rmkhl\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.716320 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.716379 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.716398 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.716423 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.716441 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.718948 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8114938b-8d31-45a0-bc30-c8cb8c3afedb\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://93fdc29cdb839af5b935b338ae61ba1b635f7407ede06c9a4f66d763474a1b9b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fb9043a2c85f166486a15453a9837f11fd5787826ed59bd98a5f6790dc25db9e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49e02fdd4a455463c1301481504f9ea29796cce5be5beae2938de8d6f92f330d\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.733196 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-pht4g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7fdff815-7d6c-4f01-946d-bc444475aa15\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a88c6eb81274f1749d51c7cd1e63197d70feb81b67a2b958c501f200aae4461\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-gzsrk\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-pht4g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.745984 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"04f943d6-91c5-4493-b310-de0b8ef7966e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://67a0180438b8ff5344c50fdf5436b481c493d5ad8ddb6bcac1dd801ea8f6ff80\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-kv7qq\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-vnmgh\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.756679 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"567b4c93-a5a8-420f-b90e-958e09074832\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://929bcd1dfbc62f6074842c0ba59e90178e8e4d6bb3a5c987af2787636eba48bf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2c45de1b53fd917e70b0f9ec929cd3d11b363a0a13f065347200809c34d25b62\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.772661 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8232b710-e236-49a9-9bfa-82ab28c7203c\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"lling back to namespace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": net/http: TLS handshake timeout\\\\nI0123 06:54:21.956636 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI0123 06:54:21.958523 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-982497867/tls.crt::/tmp/serving-cert-982497867/tls.key\\\\\\\"\\\\nI0123 06:54:27.396959 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI0123 06:54:27.401012 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI0123 06:54:27.401043 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI0123 06:54:27.401083 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI0123 06:54:27.401088 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI0123 06:54:27.415110 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW0123 06:54:27.415140 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415147 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW0123 06:54:27.415151 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW0123 06:54:27.415155 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW0123 06:54:27.415160 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW0123 06:54:27.415163 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI0123 06:54:27.415356 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF0123 06:54:27.419813 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.785913 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.802622 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f73883244ce8f04ba20e08a8e4d720dad0adfbe1f9ea3e86084a4bfdc77cf4e3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.819398 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.819445 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.819460 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.819480 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.819496 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.819729 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"b0672e4f-cd9f-47e6-8909-43e33fb9c254\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:42Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:43Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5275aa3d6a23983d05932d28ad963c1fb71fefb9651a852af97335bbf55ffb0a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:42Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f8362c35640acad61389691aa84b1519fc31e2147d3643af0d8c5731aef7d535\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9893ebb66cf16dd3b4a7fb2b1338198ec87933a7abe029efb9bb6d0555491a3b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:37Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:36Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9bcd1b3df6d15baa28695b2ee52fb37959c8424c05460a47dcaf7081fab79501\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:38Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://95a456e08e010c966845e627a0fb3a89b471a2b886003eb20b3212538e2687d7\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:39Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:39Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1868eb56da2ca0df63964158efbb9b6fa797986e95f1bb4670d4383f33570a98\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:40Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://1c2e349d87532ee95a45bf7cba24fa0cf65832539549f01babfefa6d98962c04\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:41Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-2tss9\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-fbvf7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.837999 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"aa980860-3ea9-4b1f-ae8f-d9caed98900d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:46Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:48Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://b02fbb08903562453b64b658f94878b0e96076b7b37de09f51d279ca591a9e87\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://10feaa95a47db5d4bc8455d170a5e1af90ab6e11ebe2f017257b61656e7c682e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:47Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-bx5xl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:46Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-sndn2\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.853042 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"73e4f01a-ec65-44cc-b49e-0bb9145a4515\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:00Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://720b04c670afdd5e6f4b155682be7f45dc383b0bbf79153e6e82b67c52bb8b71\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://8ffd0d606509d01a6d9116b555084c704452cf1b8d5a77215f8c0fd1ce9ec944\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://49abc8bf17cb0b85da529e00a172d14d2eb72c5cd7c8ea68db6dc6f5cfc880b5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:54:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c3e2d71e11b11e88fb4c0970ffe212e164145aaab9d9460b378227cd785fa84a\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2026-01-23T06:54:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:10Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:09Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.870952 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.889488 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.906956 5102 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-5vv4l" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c1446a26-ae38-40f3-a313-8604f5e98285\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:33Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:54:32Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:23Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2026-01-23T06:55:21Z\\\",\\\"message\\\":\\\"2026-01-23T06:54:36+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4\\\\n2026-01-23T06:54:36+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_b341c7fb-a940-494b-8bb9-898ced2bc4e4 to /host/opt/cni/bin/\\\\n2026-01-23T06:54:36Z [verbose] multus-daemon started\\\\n2026-01-23T06:54:36Z [verbose] Readiness Indicator file check\\\\n2026-01-23T06:55:21Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2026-01-23T06:54:33Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2026-01-23T06:55:22Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-sfws4\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2026-01-23T06:54:32Z\\\"}}\" for pod \"openshift-multus\"/\"multus-5vv4l\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:49Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.922598 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.922653 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.922672 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.922695 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:49 crc kubenswrapper[5102]: I0123 06:55:49.922712 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:49Z","lastTransitionTime":"2026-01-23T06:55:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.025913 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.025993 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.026016 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.026047 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.026065 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.129045 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.129107 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.129126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.129149 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.129167 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.232294 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.232368 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.232395 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.232427 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.232450 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.335728 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.335800 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.335824 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.335852 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.335873 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.438889 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.438967 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.439001 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.439032 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.439053 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.541312 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.541385 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.541405 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.541428 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.541445 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.643613 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.643654 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.643666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.643683 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.643692 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.666587 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-05 07:15:55.78551206 +0000 UTC Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.745586 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.745623 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.745631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.745643 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.745652 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.847914 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.847950 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.847966 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.847981 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.847993 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.950387 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.950450 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.950468 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.950486 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:50 crc kubenswrapper[5102]: I0123 06:55:50.950497 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:50Z","lastTransitionTime":"2026-01-23T06:55:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.054715 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.054833 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.054858 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.054892 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.054917 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.158353 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.158426 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.158449 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.158478 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.158503 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.262182 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.262261 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.262285 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.262313 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.262334 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.365912 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.365985 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.366003 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.366028 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.366131 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.469096 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.469158 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.469228 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.469256 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.469269 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.571866 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.571929 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.571952 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.571979 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.572000 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.597628 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.597695 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:51 crc kubenswrapper[5102]: E0123 06:55:51.597845 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.597890 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.597818 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:51 crc kubenswrapper[5102]: E0123 06:55:51.598320 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:51 crc kubenswrapper[5102]: E0123 06:55:51.598618 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:51 crc kubenswrapper[5102]: E0123 06:55:51.598761 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.618987 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.660901 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:51 crc kubenswrapper[5102]: E0123 06:55:51.661061 5102 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:55:51 crc kubenswrapper[5102]: E0123 06:55:51.661149 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs podName:a7d383f6-0729-4590-8252-46e50ea8ece8 nodeName:}" failed. No retries permitted until 2026-01-23 06:56:55.661125288 +0000 UTC m=+166.481474303 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs") pod "network-metrics-daemon-rmkhl" (UID: "a7d383f6-0729-4590-8252-46e50ea8ece8") : object "openshift-multus"/"metrics-daemon-secret" not registered Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.667696 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-21 15:56:36.573362271 +0000 UTC Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.674990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.675028 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.675044 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.675068 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.675086 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.778006 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.778091 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.778118 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.778151 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.778177 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.881938 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.882019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.882041 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.882071 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.882094 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.985332 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.985403 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.985420 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.985446 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:51 crc kubenswrapper[5102]: I0123 06:55:51.985511 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:51Z","lastTransitionTime":"2026-01-23T06:55:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.088818 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.088873 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.088884 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.088910 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.088921 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.191727 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.191764 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.191772 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.191785 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.191793 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.294337 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.294381 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.294390 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.294404 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.294412 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.396100 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.396162 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.396179 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.396200 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.396221 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.499630 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.499686 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.499703 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.499728 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.499749 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.602368 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.602430 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.602450 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.602472 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.602489 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.668361 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-26 08:28:19.193899082 +0000 UTC Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.704529 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.704595 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.704622 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.704635 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.704646 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.807612 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.807672 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.807683 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.807702 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.807713 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.909917 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.909977 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.909993 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.910014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:52 crc kubenswrapper[5102]: I0123 06:55:52.910030 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:52Z","lastTransitionTime":"2026-01-23T06:55:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.050483 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.050526 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.050572 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.050595 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.050612 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.118703 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.118770 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.118804 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.118837 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.118858 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.140837 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:53Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.145720 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.145773 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.145786 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.145805 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.145822 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.165846 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:53Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.171963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.172046 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.172072 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.172101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.172169 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.194118 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:53Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.200517 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.200603 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.200613 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.200636 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.200647 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.213182 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:53Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.216371 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.216404 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.216412 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.216426 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.216435 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.227867 5102 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404556Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865356Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"lastTransitionTime\\\":\\\"2026-01-23T06:55:53Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"f39edb4b-853d-44ae-bcf5-b5b79110ef33\\\",\\\"systemUUID\\\":\\\"01d84193-5d13-4a9b-819a-5818b02f0043\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2026-01-23T06:55:53Z is after 2025-08-24T17:21:41Z" Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.227991 5102 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.229310 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.229341 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.229349 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.229364 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.229373 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.332018 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.332076 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.332088 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.332105 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.332116 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.435364 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.435415 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.435427 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.435445 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.435458 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.544156 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.544215 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.544232 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.544257 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.544276 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.597812 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.597914 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.597946 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.598134 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.598263 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.598273 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.598457 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:53 crc kubenswrapper[5102]: E0123 06:55:53.598692 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.647223 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.647286 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.647303 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.647325 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.647341 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.669111 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-11 00:06:34.397400879 +0000 UTC Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.749855 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.749925 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.749942 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.749969 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.749986 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.852890 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.852953 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.852975 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.852999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.853017 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.954912 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.954970 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.954982 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.954999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:53 crc kubenswrapper[5102]: I0123 06:55:53.955009 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:53Z","lastTransitionTime":"2026-01-23T06:55:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.057364 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.057400 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.057408 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.057421 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.057429 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.160530 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.160614 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.160626 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.160642 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.160654 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.263460 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.263567 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.263590 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.263615 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.263633 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.367573 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.367632 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.367645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.367664 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.367677 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.470363 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.470450 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.470472 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.470504 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.470525 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.573148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.573214 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.573236 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.573266 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.573287 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.670195 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-10 08:05:12.19674712 +0000 UTC Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.676604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.676669 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.676692 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.676718 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.676735 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.779494 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.779616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.779642 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.779673 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.779695 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.882154 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.882238 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.882255 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.882279 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.882294 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.985067 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.985134 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.985159 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.985188 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:54 crc kubenswrapper[5102]: I0123 06:55:54.985212 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:54Z","lastTransitionTime":"2026-01-23T06:55:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.088245 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.088290 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.088301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.088317 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.088330 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.191337 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.191383 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.191395 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.191410 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.191421 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.294566 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.294610 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.294621 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.294636 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.294648 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.396987 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.397035 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.397044 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.397059 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.397069 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.500575 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.500666 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.500691 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.500720 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.500740 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.597998 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:55 crc kubenswrapper[5102]: E0123 06:55:55.598200 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.598455 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:55 crc kubenswrapper[5102]: E0123 06:55:55.598598 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.598856 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.598962 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:55 crc kubenswrapper[5102]: E0123 06:55:55.599281 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:55 crc kubenswrapper[5102]: E0123 06:55:55.599434 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.604150 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.604199 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.604223 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.604254 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.604280 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.670388 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-22 06:25:58.302610025 +0000 UTC Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.707436 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.707516 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.707576 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.707604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.707621 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.810533 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.810645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.810670 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.810698 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.810719 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.913798 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.913861 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.913878 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.913903 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:55 crc kubenswrapper[5102]: I0123 06:55:55.913920 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:55Z","lastTransitionTime":"2026-01-23T06:55:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.017078 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.017135 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.017150 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.017172 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.017187 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.120081 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.120750 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.120774 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.120932 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.120994 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.224410 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.224453 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.224461 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.224476 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.224487 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.327148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.327185 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.327193 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.327208 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.327216 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.431456 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.431517 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.431533 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.431607 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.431624 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.533660 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.533698 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.533709 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.533726 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.533738 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.636582 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.636665 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.636679 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.636702 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.636719 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.671109 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-15 06:57:59.706665587 +0000 UTC Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.740978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.741040 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.741055 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.741075 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.741089 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.844024 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.844101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.844116 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.844146 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.844162 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.946845 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.946936 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.946963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.946995 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:56 crc kubenswrapper[5102]: I0123 06:55:56.947020 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:56Z","lastTransitionTime":"2026-01-23T06:55:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.050757 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.050820 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.050841 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.050867 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.050888 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.153775 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.153839 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.153853 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.153875 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.153887 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.257225 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.257285 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.257301 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.257327 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.257343 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.359570 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.359604 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.359615 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.359629 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.359640 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.462669 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.462721 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.462733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.462762 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.462775 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.565333 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.565379 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.565387 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.565401 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.565411 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.598189 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:57 crc kubenswrapper[5102]: E0123 06:55:57.598358 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.598430 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.598465 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:57 crc kubenswrapper[5102]: E0123 06:55:57.598860 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:57 crc kubenswrapper[5102]: E0123 06:55:57.598916 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.599391 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:57 crc kubenswrapper[5102]: E0123 06:55:57.599797 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.668933 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.668989 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.668999 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.669019 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.669037 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.671992 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-13 08:21:05.356593799 +0000 UTC Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.771696 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.771735 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.771743 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.771756 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.771765 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.874926 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.874989 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.875006 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.875034 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.875052 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.977162 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.977209 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.977220 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.977237 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:57 crc kubenswrapper[5102]: I0123 06:55:57.977249 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:57Z","lastTransitionTime":"2026-01-23T06:55:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.080431 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.080509 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.080528 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.080593 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.080610 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.183295 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.183377 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.183412 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.183443 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.183466 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.287265 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.287306 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.287314 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.287327 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.287335 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.389853 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.389895 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.389904 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.389919 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.389929 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.492847 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.492916 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.492934 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.492958 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.492975 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.595682 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.595714 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.595723 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.595738 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.595749 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.672385 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 03:10:46.649867676 +0000 UTC Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.698466 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.698619 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.698651 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.698682 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.698710 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.801504 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.801620 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.801645 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.801674 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.801697 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.905575 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.905653 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.905673 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.905697 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:58 crc kubenswrapper[5102]: I0123 06:55:58.905714 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:58Z","lastTransitionTime":"2026-01-23T06:55:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.008356 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.008406 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.008414 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.008429 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.008437 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.112068 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.112108 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.112116 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.112131 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.112141 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.215164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.215217 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.215231 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.215251 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.215265 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.318694 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.318747 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.318760 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.318777 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.318790 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.421317 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.421382 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.421396 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.421415 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.421426 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.523973 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.524038 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.524050 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.524069 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.524080 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.597524 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:55:59 crc kubenswrapper[5102]: E0123 06:55:59.598031 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.597688 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.598289 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.597626 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:55:59 crc kubenswrapper[5102]: E0123 06:55:59.598418 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:55:59 crc kubenswrapper[5102]: E0123 06:55:59.598392 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:55:59 crc kubenswrapper[5102]: E0123 06:55:59.598607 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.625623 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=92.625604136 podStartE2EDuration="1m32.625604136s" podCreationTimestamp="2026-01-23 06:54:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.625159973 +0000 UTC m=+110.445508958" watchObservedRunningTime="2026-01-23 06:55:59.625604136 +0000 UTC m=+110.445953111" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.628570 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.628616 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.628631 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.628652 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.628663 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.636204 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-pht4g" podStartSLOduration=87.636175063 podStartE2EDuration="1m27.636175063s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.635851972 +0000 UTC m=+110.456200967" watchObservedRunningTime="2026-01-23 06:55:59.636175063 +0000 UTC m=+110.456524038" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.648598 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podStartSLOduration=87.648578646 podStartE2EDuration="1m27.648578646s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.646279855 +0000 UTC m=+110.466628830" watchObservedRunningTime="2026-01-23 06:55:59.648578646 +0000 UTC m=+110.468927621" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.671072 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=47.67104394 podStartE2EDuration="47.67104394s" podCreationTimestamp="2026-01-23 06:55:12 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.657869843 +0000 UTC m=+110.478218818" watchObservedRunningTime="2026-01-23 06:55:59.67104394 +0000 UTC m=+110.491392915" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.672610 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-16 21:17:18.137722149 +0000 UTC Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.687239 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=92.687214439 podStartE2EDuration="1m32.687214439s" podCreationTimestamp="2026-01-23 06:54:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.67332879 +0000 UTC m=+110.493677765" watchObservedRunningTime="2026-01-23 06:55:59.687214439 +0000 UTC m=+110.507563404" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.726938 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-fbvf7" podStartSLOduration=87.726911855 podStartE2EDuration="1m27.726911855s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.726175502 +0000 UTC m=+110.546524477" watchObservedRunningTime="2026-01-23 06:55:59.726911855 +0000 UTC m=+110.547260820" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.730943 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.730979 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.730989 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.731005 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.731017 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.743889 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-sndn2" podStartSLOduration=87.743867588 podStartE2EDuration="1m27.743867588s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.743713354 +0000 UTC m=+110.564062329" watchObservedRunningTime="2026-01-23 06:55:59.743867588 +0000 UTC m=+110.564216573" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.757885 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=59.75786112 podStartE2EDuration="59.75786112s" podCreationTimestamp="2026-01-23 06:55:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.756702524 +0000 UTC m=+110.577051519" watchObservedRunningTime="2026-01-23 06:55:59.75786112 +0000 UTC m=+110.578210115" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.828591 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=8.828554534 podStartE2EDuration="8.828554534s" podCreationTimestamp="2026-01-23 06:55:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.827609804 +0000 UTC m=+110.647958799" watchObservedRunningTime="2026-01-23 06:55:59.828554534 +0000 UTC m=+110.648903509" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.829294 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-5vv4l" podStartSLOduration=87.829286435 podStartE2EDuration="1m27.829286435s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.802318583 +0000 UTC m=+110.622667558" watchObservedRunningTime="2026-01-23 06:55:59.829286435 +0000 UTC m=+110.649635410" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.833542 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.833583 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.833595 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.833609 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.833623 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.901733 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-bmrp4" podStartSLOduration=87.901708572 podStartE2EDuration="1m27.901708572s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:55:59.901366022 +0000 UTC m=+110.721715007" watchObservedRunningTime="2026-01-23 06:55:59.901708572 +0000 UTC m=+110.722057557" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.936366 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.936429 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.936441 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.936460 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:55:59 crc kubenswrapper[5102]: I0123 06:55:59.936472 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:55:59Z","lastTransitionTime":"2026-01-23T06:55:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.040357 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.040398 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.040410 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.040427 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.040437 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.144837 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.145312 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.145418 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.145518 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.145636 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.247827 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.247893 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.247907 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.247928 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.247942 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.351125 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.351169 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.351179 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.351194 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.351203 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.453696 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.453745 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.453759 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.453777 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.453792 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.556877 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.556919 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.556934 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.556955 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.556969 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.659654 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.659734 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.659746 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.659766 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.659779 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.673231 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-17 07:31:21.945422989 +0000 UTC Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.762826 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.762903 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.762934 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.762961 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.762983 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.866114 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.866162 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.866172 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.866186 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.866196 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.969157 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.969218 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.969240 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.969267 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:00 crc kubenswrapper[5102]: I0123 06:56:00.969290 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:00Z","lastTransitionTime":"2026-01-23T06:56:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.071583 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.071635 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.071648 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.071671 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.071685 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.173963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.174045 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.174069 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.174126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.174143 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.277066 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.277126 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.277142 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.277165 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.277182 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.379204 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.379241 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.379250 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.379262 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.379273 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.481957 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.481987 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.481997 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.482011 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.482031 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.585179 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.585230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.585246 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.585268 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.585285 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.598087 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.598116 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:01 crc kubenswrapper[5102]: E0123 06:56:01.598270 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.598334 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.598348 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:01 crc kubenswrapper[5102]: E0123 06:56:01.598471 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:01 crc kubenswrapper[5102]: E0123 06:56:01.598681 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:01 crc kubenswrapper[5102]: E0123 06:56:01.598855 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.674219 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-12-15 01:47:26.34573536 +0000 UTC Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.688200 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.688251 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.688264 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.688281 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.688292 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.791378 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.791431 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.791445 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.791464 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.791476 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.895272 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.895348 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.895367 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.895392 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.895409 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.998979 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.999070 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.999164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.999188 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:01 crc kubenswrapper[5102]: I0123 06:56:01.999206 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:01Z","lastTransitionTime":"2026-01-23T06:56:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.102489 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.102598 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.102608 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.102626 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.102636 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.205903 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.205954 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.205963 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.205978 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.206005 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.308375 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.308429 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.308446 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.308469 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.308487 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.411451 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.411532 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.411642 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.411676 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.411702 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.514733 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.514788 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.514799 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.514818 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.514830 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.616916 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.616971 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.616990 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.617014 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.617030 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.675406 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-19 06:42:55.585341742 +0000 UTC Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.719702 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.719744 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.719755 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.719772 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.719783 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.822566 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.822618 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.822635 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.822650 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.822662 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.926273 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.926367 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.926402 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.926447 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:02 crc kubenswrapper[5102]: I0123 06:56:02.926469 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:02Z","lastTransitionTime":"2026-01-23T06:56:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.029089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.029148 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.029164 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.029181 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.029194 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:03Z","lastTransitionTime":"2026-01-23T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.136157 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.136219 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.136230 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.136249 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.136261 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:03Z","lastTransitionTime":"2026-01-23T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.239627 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.239678 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.239693 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.239712 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.239729 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:03Z","lastTransitionTime":"2026-01-23T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.249032 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.249089 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.249101 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.249118 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.249130 5102 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2026-01-23T06:56:03Z","lastTransitionTime":"2026-01-23T06:56:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.295903 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r"] Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.296425 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.299341 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.299738 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.300053 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.300238 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.399096 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/81202198-5901-46ac-bd2a-78cd7b357d74-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.399153 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81202198-5901-46ac-bd2a-78cd7b357d74-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.399175 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/81202198-5901-46ac-bd2a-78cd7b357d74-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.399203 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81202198-5901-46ac-bd2a-78cd7b357d74-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.399259 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81202198-5901-46ac-bd2a-78cd7b357d74-service-ca\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500089 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/81202198-5901-46ac-bd2a-78cd7b357d74-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500156 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81202198-5901-46ac-bd2a-78cd7b357d74-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500177 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/81202198-5901-46ac-bd2a-78cd7b357d74-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500196 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81202198-5901-46ac-bd2a-78cd7b357d74-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500203 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/81202198-5901-46ac-bd2a-78cd7b357d74-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500222 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81202198-5901-46ac-bd2a-78cd7b357d74-service-ca\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500380 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/81202198-5901-46ac-bd2a-78cd7b357d74-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.500988 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/81202198-5901-46ac-bd2a-78cd7b357d74-service-ca\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.506364 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/81202198-5901-46ac-bd2a-78cd7b357d74-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.518634 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/81202198-5901-46ac-bd2a-78cd7b357d74-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-dnf5r\" (UID: \"81202198-5901-46ac-bd2a-78cd7b357d74\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.597313 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.597358 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:03 crc kubenswrapper[5102]: E0123 06:56:03.597460 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.597228 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:03 crc kubenswrapper[5102]: E0123 06:56:03.597621 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.597642 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:03 crc kubenswrapper[5102]: E0123 06:56:03.598161 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:03 crc kubenswrapper[5102]: E0123 06:56:03.598288 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.598619 5102 scope.go:117] "RemoveContainer" containerID="d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741" Jan 23 06:56:03 crc kubenswrapper[5102]: E0123 06:56:03.599462 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-cgkqt_openshift-ovn-kubernetes(9b926ddd-8c4e-41b8-87f9-aa35fb7af1da)\"" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.611741 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.676131 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2025-11-10 23:50:30.285796368 +0000 UTC Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.676206 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Rotating certificates Jan 23 06:56:03 crc kubenswrapper[5102]: I0123 06:56:03.683175 5102 reflector.go:368] Caches populated for *v1.CertificateSigningRequest from k8s.io/client-go/tools/watch/informerwatcher.go:146 Jan 23 06:56:04 crc kubenswrapper[5102]: I0123 06:56:04.604081 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" event={"ID":"81202198-5901-46ac-bd2a-78cd7b357d74","Type":"ContainerStarted","Data":"0376b9bb9ae280b4816158f3c6cf2286a62443bea05dea2553255c47dc32c6a3"} Jan 23 06:56:04 crc kubenswrapper[5102]: I0123 06:56:04.604148 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" event={"ID":"81202198-5901-46ac-bd2a-78cd7b357d74","Type":"ContainerStarted","Data":"06fe326b04536cc80d3efcadcf591080209476917ffb6e6c495c24308d60173e"} Jan 23 06:56:05 crc kubenswrapper[5102]: I0123 06:56:05.598500 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:05 crc kubenswrapper[5102]: I0123 06:56:05.598514 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:05 crc kubenswrapper[5102]: E0123 06:56:05.598741 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:05 crc kubenswrapper[5102]: I0123 06:56:05.598776 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:05 crc kubenswrapper[5102]: E0123 06:56:05.598933 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:05 crc kubenswrapper[5102]: E0123 06:56:05.599070 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:05 crc kubenswrapper[5102]: I0123 06:56:05.599148 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:05 crc kubenswrapper[5102]: E0123 06:56:05.599312 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:07 crc kubenswrapper[5102]: I0123 06:56:07.597326 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:07 crc kubenswrapper[5102]: E0123 06:56:07.597515 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:07 crc kubenswrapper[5102]: I0123 06:56:07.597613 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:07 crc kubenswrapper[5102]: I0123 06:56:07.597679 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:07 crc kubenswrapper[5102]: I0123 06:56:07.597613 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:07 crc kubenswrapper[5102]: E0123 06:56:07.597800 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:07 crc kubenswrapper[5102]: E0123 06:56:07.597944 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:07 crc kubenswrapper[5102]: E0123 06:56:07.598174 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:08 crc kubenswrapper[5102]: I0123 06:56:08.616909 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/1.log" Jan 23 06:56:08 crc kubenswrapper[5102]: I0123 06:56:08.617512 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/0.log" Jan 23 06:56:08 crc kubenswrapper[5102]: I0123 06:56:08.617578 5102 generic.go:334] "Generic (PLEG): container finished" podID="c1446a26-ae38-40f3-a313-8604f5e98285" containerID="0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132" exitCode=1 Jan 23 06:56:08 crc kubenswrapper[5102]: I0123 06:56:08.617618 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerDied","Data":"0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132"} Jan 23 06:56:08 crc kubenswrapper[5102]: I0123 06:56:08.617663 5102 scope.go:117] "RemoveContainer" containerID="006673ac61e7ea4c2e160315bc861808b84d1208254eb82b51bc882c909c54af" Jan 23 06:56:08 crc kubenswrapper[5102]: I0123 06:56:08.618155 5102 scope.go:117] "RemoveContainer" containerID="0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132" Jan 23 06:56:08 crc kubenswrapper[5102]: E0123 06:56:08.618364 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-5vv4l_openshift-multus(c1446a26-ae38-40f3-a313-8604f5e98285)\"" pod="openshift-multus/multus-5vv4l" podUID="c1446a26-ae38-40f3-a313-8604f5e98285" Jan 23 06:56:08 crc kubenswrapper[5102]: I0123 06:56:08.639771 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-dnf5r" podStartSLOduration=96.639751266 podStartE2EDuration="1m36.639751266s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:04.624811682 +0000 UTC m=+115.445160657" watchObservedRunningTime="2026-01-23 06:56:08.639751266 +0000 UTC m=+119.460100241" Jan 23 06:56:09 crc kubenswrapper[5102]: I0123 06:56:09.598975 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:09 crc kubenswrapper[5102]: I0123 06:56:09.598974 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:09 crc kubenswrapper[5102]: I0123 06:56:09.599044 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:09 crc kubenswrapper[5102]: I0123 06:56:09.599119 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:09 crc kubenswrapper[5102]: E0123 06:56:09.600887 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:09 crc kubenswrapper[5102]: E0123 06:56:09.601030 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:09 crc kubenswrapper[5102]: E0123 06:56:09.601145 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:09 crc kubenswrapper[5102]: E0123 06:56:09.601202 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:09 crc kubenswrapper[5102]: I0123 06:56:09.623672 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/1.log" Jan 23 06:56:09 crc kubenswrapper[5102]: E0123 06:56:09.624397 5102 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Jan 23 06:56:09 crc kubenswrapper[5102]: E0123 06:56:09.703795 5102 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 06:56:11 crc kubenswrapper[5102]: I0123 06:56:11.597372 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:11 crc kubenswrapper[5102]: I0123 06:56:11.597383 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:11 crc kubenswrapper[5102]: E0123 06:56:11.597859 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:11 crc kubenswrapper[5102]: I0123 06:56:11.597606 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:11 crc kubenswrapper[5102]: I0123 06:56:11.597506 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:11 crc kubenswrapper[5102]: E0123 06:56:11.598220 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:11 crc kubenswrapper[5102]: E0123 06:56:11.598311 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:11 crc kubenswrapper[5102]: E0123 06:56:11.598366 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:13 crc kubenswrapper[5102]: I0123 06:56:13.597871 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:13 crc kubenswrapper[5102]: I0123 06:56:13.598798 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:13 crc kubenswrapper[5102]: E0123 06:56:13.599018 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:13 crc kubenswrapper[5102]: I0123 06:56:13.599067 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:13 crc kubenswrapper[5102]: I0123 06:56:13.599142 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:13 crc kubenswrapper[5102]: E0123 06:56:13.599329 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:13 crc kubenswrapper[5102]: E0123 06:56:13.599478 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:13 crc kubenswrapper[5102]: E0123 06:56:13.599613 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:14 crc kubenswrapper[5102]: E0123 06:56:14.705767 5102 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 06:56:15 crc kubenswrapper[5102]: I0123 06:56:15.597953 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:15 crc kubenswrapper[5102]: I0123 06:56:15.597966 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:15 crc kubenswrapper[5102]: I0123 06:56:15.598260 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:15 crc kubenswrapper[5102]: E0123 06:56:15.598360 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:15 crc kubenswrapper[5102]: I0123 06:56:15.598609 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:15 crc kubenswrapper[5102]: E0123 06:56:15.598815 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:15 crc kubenswrapper[5102]: E0123 06:56:15.598939 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:15 crc kubenswrapper[5102]: E0123 06:56:15.599114 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:16 crc kubenswrapper[5102]: I0123 06:56:16.598305 5102 scope.go:117] "RemoveContainer" containerID="d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741" Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.598180 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.598256 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.598260 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:17 crc kubenswrapper[5102]: E0123 06:56:17.598435 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.598455 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:17 crc kubenswrapper[5102]: E0123 06:56:17.598610 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:17 crc kubenswrapper[5102]: E0123 06:56:17.598700 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:17 crc kubenswrapper[5102]: E0123 06:56:17.598748 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.654293 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/3.log" Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.658239 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerStarted","Data":"c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51"} Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.658822 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:56:17 crc kubenswrapper[5102]: I0123 06:56:17.699222 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podStartSLOduration=105.699196734 podStartE2EDuration="1m45.699196734s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:17.69875376 +0000 UTC m=+128.519102735" watchObservedRunningTime="2026-01-23 06:56:17.699196734 +0000 UTC m=+128.519545709" Jan 23 06:56:18 crc kubenswrapper[5102]: I0123 06:56:18.010098 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-rmkhl"] Jan 23 06:56:18 crc kubenswrapper[5102]: I0123 06:56:18.010202 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:18 crc kubenswrapper[5102]: E0123 06:56:18.010316 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:19 crc kubenswrapper[5102]: I0123 06:56:19.597620 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:19 crc kubenswrapper[5102]: I0123 06:56:19.597719 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:19 crc kubenswrapper[5102]: I0123 06:56:19.597765 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:19 crc kubenswrapper[5102]: I0123 06:56:19.597838 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:19 crc kubenswrapper[5102]: E0123 06:56:19.599126 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:19 crc kubenswrapper[5102]: E0123 06:56:19.599198 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:19 crc kubenswrapper[5102]: E0123 06:56:19.599279 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:19 crc kubenswrapper[5102]: E0123 06:56:19.599348 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:19 crc kubenswrapper[5102]: E0123 06:56:19.706240 5102 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Jan 23 06:56:20 crc kubenswrapper[5102]: I0123 06:56:20.605744 5102 scope.go:117] "RemoveContainer" containerID="0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132" Jan 23 06:56:21 crc kubenswrapper[5102]: I0123 06:56:21.597770 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:21 crc kubenswrapper[5102]: E0123 06:56:21.598008 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:21 crc kubenswrapper[5102]: I0123 06:56:21.597817 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:21 crc kubenswrapper[5102]: E0123 06:56:21.598185 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:21 crc kubenswrapper[5102]: I0123 06:56:21.597821 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:21 crc kubenswrapper[5102]: E0123 06:56:21.598336 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:21 crc kubenswrapper[5102]: I0123 06:56:21.597815 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:21 crc kubenswrapper[5102]: E0123 06:56:21.598605 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:21 crc kubenswrapper[5102]: I0123 06:56:21.673395 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/1.log" Jan 23 06:56:21 crc kubenswrapper[5102]: I0123 06:56:21.673459 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerStarted","Data":"40aff2867ef29c03741072586843e76d98d67bbbacaf071da7e59ad200163102"} Jan 23 06:56:23 crc kubenswrapper[5102]: I0123 06:56:23.598093 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:23 crc kubenswrapper[5102]: I0123 06:56:23.598171 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:23 crc kubenswrapper[5102]: E0123 06:56:23.598467 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Jan 23 06:56:23 crc kubenswrapper[5102]: E0123 06:56:23.598365 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Jan 23 06:56:23 crc kubenswrapper[5102]: I0123 06:56:23.600716 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:23 crc kubenswrapper[5102]: I0123 06:56:23.600767 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:23 crc kubenswrapper[5102]: E0123 06:56:23.600844 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Jan 23 06:56:23 crc kubenswrapper[5102]: E0123 06:56:23.601102 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-rmkhl" podUID="a7d383f6-0729-4590-8252-46e50ea8ece8" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.598122 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.598125 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.598307 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.598314 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.601407 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.601847 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.601938 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.602097 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.602397 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 23 06:56:25 crc kubenswrapper[5102]: I0123 06:56:25.602402 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 23 06:56:33 crc kubenswrapper[5102]: I0123 06:56:33.950107 5102 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.020680 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zrhhc"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.021651 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.030616 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.030933 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.031126 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.031373 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.031664 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.031878 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.032103 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.032356 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.032701 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.032929 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.033626 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-5q97t"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.034237 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.035991 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-wqtx6"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.036671 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.038800 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.039123 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.039307 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.039514 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.039593 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.039954 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.040153 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.041077 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.041661 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.042109 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-52vtx"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.042471 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.044094 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.044159 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.055594 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.055733 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.057582 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.058109 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.058384 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.058741 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.060893 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ddjwb"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.061329 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.064617 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.064800 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.064915 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.065342 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.065487 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.065654 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.066681 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.066910 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.067074 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.067223 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.067362 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.067587 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.067751 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.068087 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.068244 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.071716 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.071779 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.071917 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.071962 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.072045 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.072974 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-serving-cert\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073061 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-node-pullsecrets\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073102 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-etcd-serving-ca\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073124 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-encryption-config\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073149 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-audit\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073188 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-etcd-client\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073219 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7bx85\" (UniqueName: \"kubernetes.io/projected/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-kube-api-access-7bx85\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073253 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-image-import-ca\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073276 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-audit-dir\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073298 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-config\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073317 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.073509 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwvr2"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074170 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074182 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074273 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074449 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074467 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074499 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074592 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074682 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074691 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074772 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074804 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074876 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074885 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074595 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074957 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074973 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074994 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.075063 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.074963 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.075081 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.075092 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.078220 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.078931 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.079412 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.079827 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.101524 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-n5pkw"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.119376 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.120600 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.120974 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-n5pkw" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121089 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121293 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121377 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121706 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121732 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121795 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121859 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121887 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.121970 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122025 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122040 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122126 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122154 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122339 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122356 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122458 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.122490 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.124831 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.125307 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.125857 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.125995 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mhgdk"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.126747 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.127206 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.129445 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.131331 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.133446 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.133529 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.133717 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.137097 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.137311 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-shspc"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.138356 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.138942 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.139493 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.139496 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.140089 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.140595 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.143434 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.144462 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.144856 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.144963 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.145084 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.145154 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.145203 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.144868 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.145321 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.145466 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.146086 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.147836 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.148052 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.148311 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.149101 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.150898 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.152758 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.153395 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwhrv"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.153810 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.154147 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.154470 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-d2tlh"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.155113 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.157631 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ldkln"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.176454 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.177476 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.177605 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.178325 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.183703 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.198017 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.198387 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-serving-cert\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.198487 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-node-pullsecrets\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.198640 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-service-ca-bundle\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.198642 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.199181 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vscws"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.199458 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.199715 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.199963 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-serving-cert\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.199994 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4nb5d\" (UniqueName: \"kubernetes.io/projected/38a43c31-7b58-4d32-8d88-66c5910a8207-kube-api-access-4nb5d\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200022 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200057 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-encryption-config\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200082 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vsdm6\" (UniqueName: \"kubernetes.io/projected/6ff670a5-d3e6-4fd5-97ad-2b07276283e8-kube-api-access-vsdm6\") pod \"downloads-7954f5f757-n5pkw\" (UID: \"6ff670a5-d3e6-4fd5-97ad-2b07276283e8\") " pod="openshift-console/downloads-7954f5f757-n5pkw" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200107 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-policies\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200132 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-etcd-serving-ca\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200155 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-serving-cert\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200178 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-audit\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200204 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200229 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ccd8j\" (UniqueName: \"kubernetes.io/projected/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-kube-api-access-ccd8j\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200260 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-client-ca\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200283 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-service-ca\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200303 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200327 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dmw8l\" (UniqueName: \"kubernetes.io/projected/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-kube-api-access-dmw8l\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200355 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-etcd-client\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200380 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7p76p\" (UniqueName: \"kubernetes.io/projected/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-kube-api-access-7p76p\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200402 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-machine-approver-tls\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200446 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-config\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200476 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d15ae6c-628c-446e-b6d3-2a1f58983409-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200498 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200522 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-auth-proxy-config\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200546 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hmzzp\" (UniqueName: \"kubernetes.io/projected/bed003ce-1b2e-4e38-982d-aed7be7819c8-kube-api-access-hmzzp\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200608 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200630 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200650 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b86088e5-98cc-41c9-9c95-710ab08cbcf5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200674 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-config\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200698 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200717 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-config\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200741 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gxgf7\" (UniqueName: \"kubernetes.io/projected/7e288133-e5c7-46c6-b29a-530d8a1c7aca-kube-api-access-gxgf7\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200764 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200785 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-audit-policies\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200806 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bed003ce-1b2e-4e38-982d-aed7be7819c8-trusted-ca\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200828 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6wzmj\" (UniqueName: \"kubernetes.io/projected/431cc446-b71e-4e5f-8689-b93573a96e44-kube-api-access-6wzmj\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200852 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200891 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xxvk6\" (UniqueName: \"kubernetes.io/projected/b5d34f41-83c0-4ad5-a95a-977fbb5dd623-kube-api-access-xxvk6\") pod \"cluster-samples-operator-665b6dd947-7kv56\" (UID: \"b5d34f41-83c0-4ad5-a95a-977fbb5dd623\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200913 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200932 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-client\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200951 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-etcd-client\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200969 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-config\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200990 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201011 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bed003ce-1b2e-4e38-982d-aed7be7819c8-serving-cert\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201040 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-config\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201060 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201082 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201104 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e288133-e5c7-46c6-b29a-530d8a1c7aca-serving-cert\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201123 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201148 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a43c31-7b58-4d32-8d88-66c5910a8207-audit-dir\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201170 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-images\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201188 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5d34f41-83c0-4ad5-a95a-977fbb5dd623-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7kv56\" (UID: \"b5d34f41-83c0-4ad5-a95a-977fbb5dd623\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201208 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-dir\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201239 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201260 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201280 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-88h58\" (UniqueName: \"kubernetes.io/projected/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-kube-api-access-88h58\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201304 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cd54m\" (UniqueName: \"kubernetes.io/projected/7d15ae6c-628c-446e-b6d3-2a1f58983409-kube-api-access-cd54m\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201332 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-encryption-config\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201352 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/431cc446-b71e-4e5f-8689-b93573a96e44-serving-cert\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201377 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7bx85\" (UniqueName: \"kubernetes.io/projected/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-kube-api-access-7bx85\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201395 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d15ae6c-628c-446e-b6d3-2a1f58983409-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201415 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b86088e5-98cc-41c9-9c95-710ab08cbcf5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201435 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-image-import-ca\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201455 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hkqvw\" (UniqueName: \"kubernetes.io/projected/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-kube-api-access-hkqvw\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201478 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-audit-dir\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201498 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-config\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201609 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201735 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xsv9v\" (UniqueName: \"kubernetes.io/projected/b86088e5-98cc-41c9-9c95-710ab08cbcf5-kube-api-access-xsv9v\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201759 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bed003ce-1b2e-4e38-982d-aed7be7819c8-config\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201778 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-client-ca\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201797 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-config\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201819 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-serving-cert\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.201838 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-ca\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.202027 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-node-pullsecrets\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.202887 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-config\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.202904 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.200329 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.204991 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.205736 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-etcd-serving-ca\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.206103 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-audit\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.206220 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-audit-dir\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.207066 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-image-import-ca\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.209317 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.211331 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-trusted-ca-bundle\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.211756 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.212424 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.217872 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-serving-cert\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.217969 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-etcd-client\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.219368 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.220098 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-encryption-config\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.220394 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.224033 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.224245 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.224856 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.236660 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-k7ghh"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.237250 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.238479 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.238983 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.239372 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.239993 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.242874 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.243720 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.244238 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.244330 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.244917 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.246554 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-5q97t"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.247683 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-hgjw2"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.248275 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.250776 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wdstp"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.251616 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.252315 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.253990 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.257168 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.259002 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zrhhc"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.260136 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-w2xbt"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.260918 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.262004 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-wqtx6"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.263156 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-52vtx"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.263617 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.264856 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.266085 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ddjwb"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.267417 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwvr2"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.268540 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.269815 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.270683 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mhgdk"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.272191 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-n5pkw"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.274880 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-w2xbt"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.275641 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwhrv"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.276512 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.277756 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.279118 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vscws"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.280464 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.281773 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.283023 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.283863 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.284232 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-shspc"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.285784 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.288658 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.290167 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.291747 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-rfs77"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.293410 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.296336 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-hgjw2"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.300122 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ldkln"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.301732 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jcg67"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302248 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01ee0518-e113-4fbd-af00-cb2b6d43baef-trusted-ca\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302371 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bg46t\" (UniqueName: \"kubernetes.io/projected/825445e2-af9b-498b-afc5-3af404eeacf2-kube-api-access-bg46t\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302487 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ee5f921e-710b-4b7d-83b0-5e17137593a0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302637 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e288133-e5c7-46c6-b29a-530d8a1c7aca-serving-cert\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302725 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-webhook-cert\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302797 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-images\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302948 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.302956 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/40620ab8-3f8a-415b-aa67-ca9f813e51a7-metrics-tls\") pod \"dns-operator-744455d44c-shspc\" (UID: \"40620ab8-3f8a-415b-aa67-ca9f813e51a7\") " pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.303187 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.303323 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.303781 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2de5e0db-4684-4211-839d-c536e81a044f-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.303869 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cd54m\" (UniqueName: \"kubernetes.io/projected/7d15ae6c-628c-446e-b6d3-2a1f58983409-kube-api-access-cd54m\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.303942 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb53c656-189e-4c35-94c5-f7ae81316c3c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8wlt9\" (UID: \"fb53c656-189e-4c35-94c5-f7ae81316c3c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.303965 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wc2ht\" (UniqueName: \"kubernetes.io/projected/fb53c656-189e-4c35-94c5-f7ae81316c3c-kube-api-access-wc2ht\") pod \"package-server-manager-789f6589d5-8wlt9\" (UID: \"fb53c656-189e-4c35-94c5-f7ae81316c3c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304010 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b86088e5-98cc-41c9-9c95-710ab08cbcf5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304028 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r8sj2\" (UniqueName: \"kubernetes.io/projected/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-kube-api-access-r8sj2\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304045 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6gsrg\" (UniqueName: \"kubernetes.io/projected/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-kube-api-access-6gsrg\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304084 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-config\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304099 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bed003ce-1b2e-4e38-982d-aed7be7819c8-config\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304116 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jc2xv\" (UniqueName: \"kubernetes.io/projected/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-kube-api-access-jc2xv\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304133 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwzb9\" (UniqueName: \"kubernetes.io/projected/90313bf5-831e-4837-9727-9fe8d2a823b4-kube-api-access-zwzb9\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304169 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2de5e0db-4684-4211-839d-c536e81a044f-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304188 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-ca\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304202 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-config\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304220 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304253 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-serving-cert\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304272 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4nb5d\" (UniqueName: \"kubernetes.io/projected/38a43c31-7b58-4d32-8d88-66c5910a8207-kube-api-access-4nb5d\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304292 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vsdm6\" (UniqueName: \"kubernetes.io/projected/6ff670a5-d3e6-4fd5-97ad-2b07276283e8-kube-api-access-vsdm6\") pod \"downloads-7954f5f757-n5pkw\" (UID: \"6ff670a5-d3e6-4fd5-97ad-2b07276283e8\") " pod="openshift-console/downloads-7954f5f757-n5pkw" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304330 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-policies\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304346 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304367 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-service-ca\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304433 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.304957 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dmw8l\" (UniqueName: \"kubernetes.io/projected/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-kube-api-access-dmw8l\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305001 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-tmpfs\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305019 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-signing-cabundle\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305037 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f3ef19c-f82b-444d-9133-364448e010c2-serving-cert\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305079 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305097 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2de5e0db-4684-4211-839d-c536e81a044f-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305118 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-config\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305289 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-config\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305369 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hmzzp\" (UniqueName: \"kubernetes.io/projected/bed003ce-1b2e-4e38-982d-aed7be7819c8-kube-api-access-hmzzp\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305414 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305434 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4f3ef19c-f82b-444d-9133-364448e010c2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305453 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d15ae6c-628c-446e-b6d3-2a1f58983409-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305470 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-auth-proxy-config\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305508 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2c2rc\" (UniqueName: \"kubernetes.io/projected/3a601a63-5329-456d-87f4-c9dc191e8589-kube-api-access-2c2rc\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305527 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-config\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305567 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305581 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bed003ce-1b2e-4e38-982d-aed7be7819c8-config\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.305646 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-ca\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.306959 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-auth-proxy-config\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.308179 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-serving-cert\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.308219 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rfs77"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.308239 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.308348 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e288133-e5c7-46c6-b29a-530d8a1c7aca-serving-cert\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.308702 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b86088e5-98cc-41c9-9c95-710ab08cbcf5-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.309477 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-config\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.309643 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-config\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.309736 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bed003ce-1b2e-4e38-982d-aed7be7819c8-trusted-ca\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.309818 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-default-certificate\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.309964 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310047 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310053 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-policies\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310078 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-config\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310090 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-service-ca\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310099 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-client\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310128 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-etcd-client\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310150 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-config\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310241 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310414 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-config\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310497 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310547 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310637 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310657 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-prq8t\" (UniqueName: \"kubernetes.io/projected/deca5a30-509e-4519-a7f5-64944e4b7dd8-kube-api-access-prq8t\") pod \"multus-admission-controller-857f4d67dd-ldkln\" (UID: \"deca5a30-509e-4519-a7f5-64944e4b7dd8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310738 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310744 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bed003ce-1b2e-4e38-982d-aed7be7819c8-trusted-ca\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310763 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/90313bf5-831e-4837-9727-9fe8d2a823b4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310822 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a43c31-7b58-4d32-8d88-66c5910a8207-audit-dir\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310846 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqzmw\" (UniqueName: \"kubernetes.io/projected/cda46479-cb25-47ec-8de7-31c9d6e22960-kube-api-access-vqzmw\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310866 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310935 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.310981 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-apiservice-cert\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.311012 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-images\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.311029 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5d34f41-83c0-4ad5-a95a-977fbb5dd623-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7kv56\" (UID: \"b5d34f41-83c0-4ad5-a95a-977fbb5dd623\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.311074 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-dir\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.311101 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-88h58\" (UniqueName: \"kubernetes.io/projected/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-kube-api-access-88h58\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.311136 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-config\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.311364 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.311971 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.312353 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/38a43c31-7b58-4d32-8d88-66c5910a8207-audit-dir\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.313374 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.313956 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-dir\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.313956 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-encryption-config\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314006 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee5f921e-710b-4b7d-83b0-5e17137593a0-config\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314041 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/431cc446-b71e-4e5f-8689-b93573a96e44-serving-cert\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314164 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01ee0518-e113-4fbd-af00-cb2b6d43baef-bound-sa-token\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314228 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d15ae6c-628c-446e-b6d3-2a1f58983409-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314294 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-etcd-client\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314333 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hkqvw\" (UniqueName: \"kubernetes.io/projected/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-kube-api-access-hkqvw\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314370 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xsv9v\" (UniqueName: \"kubernetes.io/projected/b86088e5-98cc-41c9-9c95-710ab08cbcf5-kube-api-access-xsv9v\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314372 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314404 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314488 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-proxy-tls\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314578 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-client-ca\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314638 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-signing-key\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314751 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-service-ca-bundle\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.314995 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7d15ae6c-628c-446e-b6d3-2a1f58983409-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315021 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315065 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-serving-cert\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315091 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315113 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315130 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4dhn\" (UniqueName: \"kubernetes.io/projected/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-kube-api-access-q4dhn\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315146 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/deca5a30-509e-4519-a7f5-64944e4b7dd8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ldkln\" (UID: \"deca5a30-509e-4519-a7f5-64944e4b7dd8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315171 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-serving-cert\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315188 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-client-ca\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315207 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-stats-auth\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315225 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315240 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ccd8j\" (UniqueName: \"kubernetes.io/projected/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-kube-api-access-ccd8j\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315260 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/01ee0518-e113-4fbd-af00-cb2b6d43baef-metrics-tls\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315279 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a601a63-5329-456d-87f4-c9dc191e8589-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315278 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-k7ghh"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315297 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7p76p\" (UniqueName: \"kubernetes.io/projected/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-kube-api-access-7p76p\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315282 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-service-ca-bundle\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315315 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-machine-approver-tls\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315357 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7g4bg\" (UniqueName: \"kubernetes.io/projected/40620ab8-3f8a-415b-aa67-ca9f813e51a7-kube-api-access-7g4bg\") pod \"dns-operator-744455d44c-shspc\" (UID: \"40620ab8-3f8a-415b-aa67-ca9f813e51a7\") " pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315411 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kzl7l\" (UniqueName: \"kubernetes.io/projected/01ee0518-e113-4fbd-af00-cb2b6d43baef-kube-api-access-kzl7l\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315434 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a601a63-5329-456d-87f4-c9dc191e8589-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315482 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/90313bf5-831e-4837-9727-9fe8d2a823b4-srv-cert\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315491 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-client-ca\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315525 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315614 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315664 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b86088e5-98cc-41c9-9c95-710ab08cbcf5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315691 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee5f921e-710b-4b7d-83b0-5e17137593a0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315742 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gxgf7\" (UniqueName: \"kubernetes.io/projected/7e288133-e5c7-46c6-b29a-530d8a1c7aca-kube-api-access-gxgf7\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315795 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-audit-policies\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315827 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6wzmj\" (UniqueName: \"kubernetes.io/projected/431cc446-b71e-4e5f-8689-b93573a96e44-kube-api-access-6wzmj\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315850 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315880 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-client-ca\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315894 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gglrn\" (UniqueName: \"kubernetes.io/projected/2de5e0db-4684-4211-839d-c536e81a044f-kube-api-access-gglrn\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315924 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315974 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xxvk6\" (UniqueName: \"kubernetes.io/projected/b5d34f41-83c0-4ad5-a95a-977fbb5dd623-kube-api-access-xxvk6\") pod \"cluster-samples-operator-665b6dd947-7kv56\" (UID: \"b5d34f41-83c0-4ad5-a95a-977fbb5dd623\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316055 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/825445e2-af9b-498b-afc5-3af404eeacf2-service-ca-bundle\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316099 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-metrics-certs\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316123 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.315241 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-images\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316402 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316571 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6vbd9\" (UniqueName: \"kubernetes.io/projected/4f3ef19c-f82b-444d-9133-364448e010c2-kube-api-access-6vbd9\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316626 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316617 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bed003ce-1b2e-4e38-982d-aed7be7819c8-serving-cert\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316670 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316678 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-proxy-tls\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316792 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316794 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/431cc446-b71e-4e5f-8689-b93573a96e44-serving-cert\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.316921 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/38a43c31-7b58-4d32-8d88-66c5910a8207-audit-policies\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.317427 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-etcd-client\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.317597 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b86088e5-98cc-41c9-9c95-710ab08cbcf5-config\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.317658 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/431cc446-b71e-4e5f-8689-b93573a96e44-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.318111 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/b5d34f41-83c0-4ad5-a95a-977fbb5dd623-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-7kv56\" (UID: \"b5d34f41-83c0-4ad5-a95a-977fbb5dd623\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.318120 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.318836 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.319338 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-serving-cert\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.319468 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-machine-approver-tls\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.319835 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.319899 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.320129 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.320282 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7d15ae6c-628c-446e-b6d3-2a1f58983409-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.320319 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.320661 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/38a43c31-7b58-4d32-8d88-66c5910a8207-encryption-config\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.320723 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bed003ce-1b2e-4e38-982d-aed7be7819c8-serving-cert\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.320765 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-config\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.321778 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-serving-cert\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.322001 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wdstp"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.323745 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.323779 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.325296 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.326311 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.327368 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.328657 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.329818 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jcg67"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.330846 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-57kr6"] Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.331474 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.343298 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.364291 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.384664 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.404644 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418298 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jc2xv\" (UniqueName: \"kubernetes.io/projected/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-kube-api-access-jc2xv\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418339 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwzb9\" (UniqueName: \"kubernetes.io/projected/90313bf5-831e-4837-9727-9fe8d2a823b4-kube-api-access-zwzb9\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418363 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2de5e0db-4684-4211-839d-c536e81a044f-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418402 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418427 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-tmpfs\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418444 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-signing-cabundle\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418463 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f3ef19c-f82b-444d-9133-364448e010c2-serving-cert\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418485 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418501 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2de5e0db-4684-4211-839d-c536e81a044f-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418516 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4f3ef19c-f82b-444d-9133-364448e010c2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418563 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2c2rc\" (UniqueName: \"kubernetes.io/projected/3a601a63-5329-456d-87f4-c9dc191e8589-kube-api-access-2c2rc\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418610 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-plugins-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418634 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-default-certificate\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418669 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f9vgd\" (UniqueName: \"kubernetes.io/projected/3b3cdc03-9859-47b4-950a-3b01b64a11fb-kube-api-access-f9vgd\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418701 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418717 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418732 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-prq8t\" (UniqueName: \"kubernetes.io/projected/deca5a30-509e-4519-a7f5-64944e4b7dd8-kube-api-access-prq8t\") pod \"multus-admission-controller-857f4d67dd-ldkln\" (UID: \"deca5a30-509e-4519-a7f5-64944e4b7dd8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418748 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-socket-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418785 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqzmw\" (UniqueName: \"kubernetes.io/projected/cda46479-cb25-47ec-8de7-31c9d6e22960-kube-api-access-vqzmw\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418802 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/90313bf5-831e-4837-9727-9fe8d2a823b4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418818 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-apiservice-cert\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418836 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418856 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee5f921e-710b-4b7d-83b0-5e17137593a0-config\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418883 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01ee0518-e113-4fbd-af00-cb2b6d43baef-bound-sa-token\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418909 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-proxy-tls\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418925 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-signing-key\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418943 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/deca5a30-509e-4519-a7f5-64944e4b7dd8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ldkln\" (UID: \"deca5a30-509e-4519-a7f5-64944e4b7dd8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418957 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418973 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4dhn\" (UniqueName: \"kubernetes.io/projected/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-kube-api-access-q4dhn\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.418996 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-stats-auth\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419010 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/01ee0518-e113-4fbd-af00-cb2b6d43baef-metrics-tls\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419025 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a601a63-5329-456d-87f4-c9dc191e8589-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419053 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7g4bg\" (UniqueName: \"kubernetes.io/projected/40620ab8-3f8a-415b-aa67-ca9f813e51a7-kube-api-access-7g4bg\") pod \"dns-operator-744455d44c-shspc\" (UID: \"40620ab8-3f8a-415b-aa67-ca9f813e51a7\") " pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419068 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kzl7l\" (UniqueName: \"kubernetes.io/projected/01ee0518-e113-4fbd-af00-cb2b6d43baef-kube-api-access-kzl7l\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419086 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a601a63-5329-456d-87f4-c9dc191e8589-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419108 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/90313bf5-831e-4837-9727-9fe8d2a823b4-srv-cert\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419180 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee5f921e-710b-4b7d-83b0-5e17137593a0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419210 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419230 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gglrn\" (UniqueName: \"kubernetes.io/projected/2de5e0db-4684-4211-839d-c536e81a044f-kube-api-access-gglrn\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419245 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/825445e2-af9b-498b-afc5-3af404eeacf2-service-ca-bundle\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419259 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-metrics-certs\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419252 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/4f3ef19c-f82b-444d-9133-364448e010c2-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419288 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-proxy-tls\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419306 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6vbd9\" (UniqueName: \"kubernetes.io/projected/4f3ef19c-f82b-444d-9133-364448e010c2-kube-api-access-6vbd9\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419340 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01ee0518-e113-4fbd-af00-cb2b6d43baef-trusted-ca\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419361 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-csi-data-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419388 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bg46t\" (UniqueName: \"kubernetes.io/projected/825445e2-af9b-498b-afc5-3af404eeacf2-kube-api-access-bg46t\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419414 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ee5f921e-710b-4b7d-83b0-5e17137593a0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419440 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-images\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419486 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-webhook-cert\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419566 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/40620ab8-3f8a-415b-aa67-ca9f813e51a7-metrics-tls\") pod \"dns-operator-744455d44c-shspc\" (UID: \"40620ab8-3f8a-415b-aa67-ca9f813e51a7\") " pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419592 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2de5e0db-4684-4211-839d-c536e81a044f-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419623 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb53c656-189e-4c35-94c5-f7ae81316c3c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8wlt9\" (UID: \"fb53c656-189e-4c35-94c5-f7ae81316c3c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419642 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wc2ht\" (UniqueName: \"kubernetes.io/projected/fb53c656-189e-4c35-94c5-f7ae81316c3c-kube-api-access-wc2ht\") pod \"package-server-manager-789f6589d5-8wlt9\" (UID: \"fb53c656-189e-4c35-94c5-f7ae81316c3c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419661 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-mountpoint-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419677 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-registration-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419697 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6gsrg\" (UniqueName: \"kubernetes.io/projected/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-kube-api-access-6gsrg\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419717 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r8sj2\" (UniqueName: \"kubernetes.io/projected/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-kube-api-access-r8sj2\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.419927 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-auth-proxy-config\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.420732 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.420816 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a601a63-5329-456d-87f4-c9dc191e8589-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.421213 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-images\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.421901 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-tmpfs\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.423026 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.423310 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-proxy-tls\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.424070 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.424295 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/40620ab8-3f8a-415b-aa67-ca9f813e51a7-metrics-tls\") pod \"dns-operator-744455d44c-shspc\" (UID: \"40620ab8-3f8a-415b-aa67-ca9f813e51a7\") " pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.425160 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a601a63-5329-456d-87f4-c9dc191e8589-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.425987 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-proxy-tls\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.448477 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.450438 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.463697 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.483893 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.505417 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.512704 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/4f3ef19c-f82b-444d-9133-364448e010c2-serving-cert\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520462 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f9vgd\" (UniqueName: \"kubernetes.io/projected/3b3cdc03-9859-47b4-950a-3b01b64a11fb-kube-api-access-f9vgd\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520543 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-socket-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520799 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-socket-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520815 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-csi-data-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520931 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-csi-data-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520950 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-mountpoint-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520974 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-registration-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.520978 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-mountpoint-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.521098 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-plugins-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.521126 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-plugins-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.521100 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/3b3cdc03-9859-47b4-950a-3b01b64a11fb-registration-dir\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.523647 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.544300 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.564430 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.583972 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.593219 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-default-certificate\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.604343 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.613944 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-stats-auth\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.625028 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.633382 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/825445e2-af9b-498b-afc5-3af404eeacf2-metrics-certs\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.644681 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.651765 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/825445e2-af9b-498b-afc5-3af404eeacf2-service-ca-bundle\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.664250 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.684099 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.703803 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.724925 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.733735 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/fb53c656-189e-4c35-94c5-f7ae81316c3c-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-8wlt9\" (UID: \"fb53c656-189e-4c35-94c5-f7ae81316c3c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.745648 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.765534 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.785618 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.794117 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/deca5a30-509e-4519-a7f5-64944e4b7dd8-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-ldkln\" (UID: \"deca5a30-509e-4519-a7f5-64944e4b7dd8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.805421 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.815102 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-apiservice-cert\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.815750 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-webhook-cert\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.825539 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.853130 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.863369 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2de5e0db-4684-4211-839d-c536e81a044f-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.864716 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.885663 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.895113 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-signing-key\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.905816 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.909968 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-signing-cabundle\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.925134 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.945470 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.964691 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.974754 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/2de5e0db-4684-4211-839d-c536e81a044f-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:34 crc kubenswrapper[5102]: I0123 06:56:34.985309 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.004918 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.024702 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.034813 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ee5f921e-710b-4b7d-83b0-5e17137593a0-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.065307 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.065992 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7bx85\" (UniqueName: \"kubernetes.io/projected/6cfe0251-9d72-45fb-9df0-1f58d2ed6002-kube-api-access-7bx85\") pod \"apiserver-76f77b778f-zrhhc\" (UID: \"6cfe0251-9d72-45fb-9df0-1f58d2ed6002\") " pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.072173 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ee5f921e-710b-4b7d-83b0-5e17137593a0-config\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.085093 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.095398 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/90313bf5-831e-4837-9727-9fe8d2a823b4-srv-cert\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.105142 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.116153 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/90313bf5-831e-4837-9727-9fe8d2a823b4-profile-collector-cert\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.125745 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.145728 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.165242 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.185992 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.195147 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/01ee0518-e113-4fbd-af00-cb2b6d43baef-metrics-tls\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.221467 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.226757 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.233170 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/01ee0518-e113-4fbd-af00-cb2b6d43baef-trusted-ca\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.242683 5102 request.go:700] Waited for 1.017486505s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-scheduler-operator/secrets?fieldSelector=metadata.name%3Dopenshift-kube-scheduler-operator-dockercfg-qt55r&limit=500&resourceVersion=0 Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.250607 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.264661 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.270757 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.278937 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.284800 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.291094 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.305269 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.325343 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.346427 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.369010 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.384660 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.414545 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.423971 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.444657 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.462349 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-zrhhc"] Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.464676 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.484625 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.504748 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.524398 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.545498 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.565458 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.584542 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.604296 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.624357 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.644431 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.644580 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 23 06:56:35 crc kubenswrapper[5102]: E0123 06:56:35.644709 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:58:37.644679292 +0000 UTC m=+268.465028287 (durationBeforeRetry 2m2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.644900 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.644949 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.645146 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.645265 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.646125 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.650381 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.650624 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.651509 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.664492 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.684714 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.704366 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.724009 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.738384 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" event={"ID":"6cfe0251-9d72-45fb-9df0-1f58d2ed6002","Type":"ContainerStarted","Data":"37298cbcdabbb302e646a73d79700c52f715a0c1b63a231a7ffe3f4b2d1fe032"} Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.749197 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.764385 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.784969 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.804448 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.824265 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.835165 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.844094 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.850022 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.863190 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.864796 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.884182 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.904783 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.927239 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.945156 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.964238 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 23 06:56:35 crc kubenswrapper[5102]: I0123 06:56:35.988083 5102 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.007933 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.025592 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 23 06:56:36 crc kubenswrapper[5102]: W0123 06:56:36.030854 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5fe485a1_e14f_4c09_b5b9_f252bc42b7e8.slice/crio-24436c0c49d5e4e1c4826e4f256a8f411d04df6a06d6e66214a65392f014aeee WatchSource:0}: Error finding container 24436c0c49d5e4e1c4826e4f256a8f411d04df6a06d6e66214a65392f014aeee: Status 404 returned error can't find the container with id 24436c0c49d5e4e1c4826e4f256a8f411d04df6a06d6e66214a65392f014aeee Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.062054 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cd54m\" (UniqueName: \"kubernetes.io/projected/7d15ae6c-628c-446e-b6d3-2a1f58983409-kube-api-access-cd54m\") pod \"openshift-controller-manager-operator-756b6f6bc6-nr8q6\" (UID: \"7d15ae6c-628c-446e-b6d3-2a1f58983409\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.080673 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4nb5d\" (UniqueName: \"kubernetes.io/projected/38a43c31-7b58-4d32-8d88-66c5910a8207-kube-api-access-4nb5d\") pod \"apiserver-7bbb656c7d-4rpzc\" (UID: \"38a43c31-7b58-4d32-8d88-66c5910a8207\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.090857 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.110286 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hmzzp\" (UniqueName: \"kubernetes.io/projected/bed003ce-1b2e-4e38-982d-aed7be7819c8-kube-api-access-hmzzp\") pod \"console-operator-58897d9998-ddjwb\" (UID: \"bed003ce-1b2e-4e38-982d-aed7be7819c8\") " pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.140694 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vsdm6\" (UniqueName: \"kubernetes.io/projected/6ff670a5-d3e6-4fd5-97ad-2b07276283e8-kube-api-access-vsdm6\") pod \"downloads-7954f5f757-n5pkw\" (UID: \"6ff670a5-d3e6-4fd5-97ad-2b07276283e8\") " pod="openshift-console/downloads-7954f5f757-n5pkw" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.143364 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dmw8l\" (UniqueName: \"kubernetes.io/projected/f4521e45-5ad8-4088-ac0c-86ec8d1149a2-kube-api-access-dmw8l\") pod \"machine-approver-56656f9798-jbjds\" (UID: \"f4521e45-5ad8-4088-ac0c-86ec8d1149a2\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.182445 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-88h58\" (UniqueName: \"kubernetes.io/projected/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-kube-api-access-88h58\") pod \"controller-manager-879f6c89f-wqtx6\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.201407 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hkqvw\" (UniqueName: \"kubernetes.io/projected/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-kube-api-access-hkqvw\") pod \"oauth-openshift-558db77b4-cwvr2\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.222611 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xsv9v\" (UniqueName: \"kubernetes.io/projected/b86088e5-98cc-41c9-9c95-710ab08cbcf5-kube-api-access-xsv9v\") pod \"openshift-apiserver-operator-796bbdcf4f-fd24q\" (UID: \"b86088e5-98cc-41c9-9c95-710ab08cbcf5\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.243174 5102 request.go:700] Waited for 1.926350164s due to client-side throttling, not priority and fairness, request: POST:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-route-controller-manager/serviceaccounts/route-controller-manager-sa/token Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.246685 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7p76p\" (UniqueName: \"kubernetes.io/projected/c0c425b8-c2d0-4f99-b237-a8f18db7e8ea-kube-api-access-7p76p\") pod \"etcd-operator-b45778765-mhgdk\" (UID: \"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea\") " pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.251334 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.265867 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gxgf7\" (UniqueName: \"kubernetes.io/projected/7e288133-e5c7-46c6-b29a-530d8a1c7aca-kube-api-access-gxgf7\") pod \"route-controller-manager-6576b87f9c-lttd5\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.302019 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.303534 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6wzmj\" (UniqueName: \"kubernetes.io/projected/431cc446-b71e-4e5f-8689-b93573a96e44-kube-api-access-6wzmj\") pod \"authentication-operator-69f744f599-52vtx\" (UID: \"431cc446-b71e-4e5f-8689-b93573a96e44\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:36 crc kubenswrapper[5102]: W0123 06:56:36.305584 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d751cbb_f2e2_430d_9754_c882a5e924a5.slice/crio-103e523630221034ac162b2549ab72c4c15535d454e08c68c74b58cd7e4e3fc9 WatchSource:0}: Error finding container 103e523630221034ac162b2549ab72c4c15535d454e08c68c74b58cd7e4e3fc9: Status 404 returned error can't find the container with id 103e523630221034ac162b2549ab72c4c15535d454e08c68c74b58cd7e4e3fc9 Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.310267 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ccd8j\" (UniqueName: \"kubernetes.io/projected/7cf0a44b-e9bc-42da-8883-eb6c9a58f37e-kube-api-access-ccd8j\") pod \"machine-api-operator-5694c8668f-5q97t\" (UID: \"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:36 crc kubenswrapper[5102]: W0123 06:56:36.320437 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-bace28bd85cbbe8223e87282394da45b8ece18a83207381db575f96e96dc56e4 WatchSource:0}: Error finding container bace28bd85cbbe8223e87282394da45b8ece18a83207381db575f96e96dc56e4: Status 404 returned error can't find the container with id bace28bd85cbbe8223e87282394da45b8ece18a83207381db575f96e96dc56e4 Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.320461 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xxvk6\" (UniqueName: \"kubernetes.io/projected/b5d34f41-83c0-4ad5-a95a-977fbb5dd623-kube-api-access-xxvk6\") pod \"cluster-samples-operator-665b6dd947-7kv56\" (UID: \"b5d34f41-83c0-4ad5-a95a-977fbb5dd623\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.320816 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.327035 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.344048 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.345428 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.356966 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.365722 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.367875 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.374970 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.383831 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.402188 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-n5pkw" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.426506 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwzb9\" (UniqueName: \"kubernetes.io/projected/90313bf5-831e-4837-9727-9fe8d2a823b4-kube-api-access-zwzb9\") pod \"olm-operator-6b444d44fb-v9rnf\" (UID: \"90313bf5-831e-4837-9727-9fe8d2a823b4\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.445908 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jc2xv\" (UniqueName: \"kubernetes.io/projected/cbba35c4-c6a7-44c7-9da3-acc7aad55b6a-kube-api-access-jc2xv\") pod \"machine-config-controller-84d6567774-bj6m5\" (UID: \"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.449922 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.460251 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2de5e0db-4684-4211-839d-c536e81a044f-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.484823 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/01ee0518-e113-4fbd-af00-cb2b6d43baef-bound-sa-token\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.485603 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.501266 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.549851 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.556920 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gglrn\" (UniqueName: \"kubernetes.io/projected/2de5e0db-4684-4211-839d-c536e81a044f-kube-api-access-gglrn\") pod \"cluster-image-registry-operator-dc59b4c8b-7n6qf\" (UID: \"2de5e0db-4684-4211-839d-c536e81a044f\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.564615 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.568540 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/32a6a23b-e2d2-48fc-81ea-ca2ae68f4245-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-g4kjg\" (UID: \"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.568955 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4dhn\" (UniqueName: \"kubernetes.io/projected/0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7-kube-api-access-q4dhn\") pod \"packageserver-d55dfcdfc-2tts4\" (UID: \"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.569202 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2c2rc\" (UniqueName: \"kubernetes.io/projected/3a601a63-5329-456d-87f4-c9dc191e8589-kube-api-access-2c2rc\") pod \"kube-storage-version-migrator-operator-b67b599dd-485n8\" (UID: \"3a601a63-5329-456d-87f4-c9dc191e8589\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.578206 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.647441 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.657013 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-prq8t\" (UniqueName: \"kubernetes.io/projected/deca5a30-509e-4519-a7f5-64944e4b7dd8-kube-api-access-prq8t\") pod \"multus-admission-controller-857f4d67dd-ldkln\" (UID: \"deca5a30-509e-4519-a7f5-64944e4b7dd8\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.672513 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ee5f921e-710b-4b7d-83b0-5e17137593a0-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-8qbr5\" (UID: \"ee5f921e-710b-4b7d-83b0-5e17137593a0\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.674084 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7g4bg\" (UniqueName: \"kubernetes.io/projected/40620ab8-3f8a-415b-aa67-ca9f813e51a7-kube-api-access-7g4bg\") pod \"dns-operator-744455d44c-shspc\" (UID: \"40620ab8-3f8a-415b-aa67-ca9f813e51a7\") " pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.676753 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bg46t\" (UniqueName: \"kubernetes.io/projected/825445e2-af9b-498b-afc5-3af404eeacf2-kube-api-access-bg46t\") pod \"router-default-5444994796-d2tlh\" (UID: \"825445e2-af9b-498b-afc5-3af404eeacf2\") " pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.677648 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6vbd9\" (UniqueName: \"kubernetes.io/projected/4f3ef19c-f82b-444d-9133-364448e010c2-kube-api-access-6vbd9\") pod \"openshift-config-operator-7777fb866f-zvcbn\" (UID: \"4f3ef19c-f82b-444d-9133-364448e010c2\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.700982 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6gsrg\" (UniqueName: \"kubernetes.io/projected/d1d9d0ca-c225-4b42-8c2a-0264deb35d71-kube-api-access-6gsrg\") pod \"machine-config-operator-74547568cd-b4t7z\" (UID: \"d1d9d0ca-c225-4b42-8c2a-0264deb35d71\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.701076 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kzl7l\" (UniqueName: \"kubernetes.io/projected/01ee0518-e113-4fbd-af00-cb2b6d43baef-kube-api-access-kzl7l\") pod \"ingress-operator-5b745b69d9-cxvms\" (UID: \"01ee0518-e113-4fbd-af00-cb2b6d43baef\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.722439 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqzmw\" (UniqueName: \"kubernetes.io/projected/cda46479-cb25-47ec-8de7-31c9d6e22960-kube-api-access-vqzmw\") pod \"marketplace-operator-79b997595-bwhrv\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.748521 5102 generic.go:334] "Generic (PLEG): container finished" podID="6cfe0251-9d72-45fb-9df0-1f58d2ed6002" containerID="e9cc20265e70c8497128bf6c7784d07a5514de86eda0e643b87e71ddafbec171" exitCode=0 Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.748797 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" event={"ID":"6cfe0251-9d72-45fb-9df0-1f58d2ed6002","Type":"ContainerDied","Data":"e9cc20265e70c8497128bf6c7784d07a5514de86eda0e643b87e71ddafbec171"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.752435 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"a0fb800311c1cb6efe9891ae634e83877bfcf85ab1d707ec99ce570786db6494"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.752462 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"103e523630221034ac162b2549ab72c4c15535d454e08c68c74b58cd7e4e3fc9"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.754054 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" event={"ID":"f4521e45-5ad8-4088-ac0c-86ec8d1149a2","Type":"ContainerStarted","Data":"1badc6e7cc62518ad85256ffa7ef7694ff4d47a0763394221c987ce206e824ed"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.755369 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"ff288b61766869013fac5b6008d14627318ca50ba172d2b1f610b436df1aaa4c"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.755391 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"bace28bd85cbbe8223e87282394da45b8ece18a83207381db575f96e96dc56e4"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.755702 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.757285 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"46f27eb5447b0e1862e8ccca8a129a695a3526f71f21ee94a3d3f10af521f30c"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.792982 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.793760 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.794463 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-shspc" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.794875 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.795223 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"24436c0c49d5e4e1c4826e4f256a8f411d04df6a06d6e66214a65392f014aeee"} Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.799635 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r8sj2\" (UniqueName: \"kubernetes.io/projected/ac8be70a-9f33-4b73-a175-9c5c0dd3f262-kube-api-access-r8sj2\") pod \"service-ca-9c57cc56f-vscws\" (UID: \"ac8be70a-9f33-4b73-a175-9c5c0dd3f262\") " pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.810907 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wc2ht\" (UniqueName: \"kubernetes.io/projected/fb53c656-189e-4c35-94c5-f7ae81316c3c-kube-api-access-wc2ht\") pod \"package-server-manager-789f6589d5-8wlt9\" (UID: \"fb53c656-189e-4c35-94c5-f7ae81316c3c\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.814378 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.814714 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.819920 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.861362 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.862472 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.864054 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-vscws" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.882263 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.886067 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.888167 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-52vtx"] Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.890447 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f9vgd\" (UniqueName: \"kubernetes.io/projected/3b3cdc03-9859-47b4-950a-3b01b64a11fb-kube-api-access-f9vgd\") pod \"csi-hostpathplugin-jcg67\" (UID: \"3b3cdc03-9859-47b4-950a-3b01b64a11fb\") " pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918620 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/25efae93-08ee-4c4a-88db-3faa88559398-ca-trust-extracted\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918684 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2b3113af-a4c8-498d-b357-21038e8ff69e-metrics-tls\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918710 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-service-ca\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918731 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e00eab55-a637-4e32-800b-b4703b747bc2-profile-collector-cert\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918748 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8lfr\" (UniqueName: \"kubernetes.io/projected/2b3113af-a4c8-498d-b357-21038e8ff69e-kube-api-access-f8lfr\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918805 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3113af-a4c8-498d-b357-21038e8ff69e-config-volume\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918906 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-trusted-ca-bundle\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.918943 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/25efae93-08ee-4c4a-88db-3faa88559398-installation-pull-secrets\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919003 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-oauth-serving-cert\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919024 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8n9kp\" (UniqueName: \"kubernetes.io/projected/b1dfbc89-0a35-44dc-968d-ff63a407d71e-kube-api-access-8n9kp\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919054 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-registry-tls\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919075 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5092a2-a268-42c6-98be-b902ae96f92f-config-volume\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919096 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-config\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919126 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-registry-certificates\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919160 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kddkv\" (UniqueName: \"kubernetes.io/projected/5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe-kube-api-access-kddkv\") pod \"migrator-59844c95c7-jlcpp\" (UID: \"5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919236 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5b9f87f2-6e86-4a80-9725-8d94129a946a-certs\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919399 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1dfbc89-0a35-44dc-968d-ff63a407d71e-serving-cert\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919480 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919509 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e00eab55-a637-4e32-800b-b4703b747bc2-srv-cert\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919539 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hcm89\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-kube-api-access-hcm89\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919671 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5b9f87f2-6e86-4a80-9725-8d94129a946a-node-bootstrap-token\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919692 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sr5wf\" (UniqueName: \"kubernetes.io/projected/9f9d33f3-df0d-4588-957d-6b7e3646eef4-kube-api-access-sr5wf\") pod \"ingress-canary-rfs77\" (UID: \"9f9d33f3-df0d-4588-957d-6b7e3646eef4\") " pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919723 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z7r4z\" (UniqueName: \"kubernetes.io/projected/4578060b-5283-42ea-aa38-c925d4265270-kube-api-access-z7r4z\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919742 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dfbc89-0a35-44dc-968d-ff63a407d71e-config\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919761 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919888 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-console-config\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.919950 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-oauth-config\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.920024 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-trusted-ca\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.931317 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/49c4be94-985a-4c36-bb76-9dc6cdb0da17-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-b59qh\" (UID: \"49c4be94-985a-4c36-bb76-9dc6cdb0da17\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.931421 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.931678 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f9d33f3-df0d-4588-957d-6b7e3646eef4-cert\") pod \"ingress-canary-rfs77\" (UID: \"9f9d33f3-df0d-4588-957d-6b7e3646eef4\") " pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.931718 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tprdj\" (UniqueName: \"kubernetes.io/projected/5b9f87f2-6e86-4a80-9725-8d94129a946a-kube-api-access-tprdj\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.931773 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-serving-cert\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.931916 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5092a2-a268-42c6-98be-b902ae96f92f-secret-volume\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.931975 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk69m\" (UniqueName: \"kubernetes.io/projected/ac5092a2-a268-42c6-98be-b902ae96f92f-kube-api-access-mk69m\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.932037 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-bound-sa-token\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.932055 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tpwxs\" (UniqueName: \"kubernetes.io/projected/e00eab55-a637-4e32-800b-b4703b747bc2-kube-api-access-tpwxs\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.932097 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s75l6\" (UniqueName: \"kubernetes.io/projected/49c4be94-985a-4c36-bb76-9dc6cdb0da17-kube-api-access-s75l6\") pod \"control-plane-machine-set-operator-78cbb6b69f-b59qh\" (UID: \"49c4be94-985a-4c36-bb76-9dc6cdb0da17\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:36 crc kubenswrapper[5102]: E0123 06:56:36.936495 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:37.436474095 +0000 UTC m=+148.256823240 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:36 crc kubenswrapper[5102]: I0123 06:56:36.996623 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.014492 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc"] Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.034285 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.034504 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:37.534466598 +0000 UTC m=+148.354815573 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.035351 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f9d33f3-df0d-4588-957d-6b7e3646eef4-cert\") pod \"ingress-canary-rfs77\" (UID: \"9f9d33f3-df0d-4588-957d-6b7e3646eef4\") " pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.035514 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tprdj\" (UniqueName: \"kubernetes.io/projected/5b9f87f2-6e86-4a80-9725-8d94129a946a-kube-api-access-tprdj\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.035643 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-serving-cert\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.035799 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5092a2-a268-42c6-98be-b902ae96f92f-secret-volume\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.036292 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk69m\" (UniqueName: \"kubernetes.io/projected/ac5092a2-a268-42c6-98be-b902ae96f92f-kube-api-access-mk69m\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.036416 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-bound-sa-token\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.036542 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tpwxs\" (UniqueName: \"kubernetes.io/projected/e00eab55-a637-4e32-800b-b4703b747bc2-kube-api-access-tpwxs\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.036710 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s75l6\" (UniqueName: \"kubernetes.io/projected/49c4be94-985a-4c36-bb76-9dc6cdb0da17-kube-api-access-s75l6\") pod \"control-plane-machine-set-operator-78cbb6b69f-b59qh\" (UID: \"49c4be94-985a-4c36-bb76-9dc6cdb0da17\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.036851 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/25efae93-08ee-4c4a-88db-3faa88559398-ca-trust-extracted\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.036948 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2b3113af-a4c8-498d-b357-21038e8ff69e-metrics-tls\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037047 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-service-ca\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037140 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e00eab55-a637-4e32-800b-b4703b747bc2-profile-collector-cert\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037233 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8lfr\" (UniqueName: \"kubernetes.io/projected/2b3113af-a4c8-498d-b357-21038e8ff69e-kube-api-access-f8lfr\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037324 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3113af-a4c8-498d-b357-21038e8ff69e-config-volume\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037437 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-trusted-ca-bundle\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037544 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/25efae93-08ee-4c4a-88db-3faa88559398-installation-pull-secrets\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037684 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-oauth-serving-cert\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037792 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8n9kp\" (UniqueName: \"kubernetes.io/projected/b1dfbc89-0a35-44dc-968d-ff63a407d71e-kube-api-access-8n9kp\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037879 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-registry-tls\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.037972 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5092a2-a268-42c6-98be-b902ae96f92f-config-volume\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038068 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-config\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038169 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-registry-certificates\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038290 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kddkv\" (UniqueName: \"kubernetes.io/projected/5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe-kube-api-access-kddkv\") pod \"migrator-59844c95c7-jlcpp\" (UID: \"5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038424 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5b9f87f2-6e86-4a80-9725-8d94129a946a-certs\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038552 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1dfbc89-0a35-44dc-968d-ff63a407d71e-serving-cert\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038693 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038855 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e00eab55-a637-4e32-800b-b4703b747bc2-srv-cert\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038966 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hcm89\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-kube-api-access-hcm89\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.039131 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5b9f87f2-6e86-4a80-9725-8d94129a946a-node-bootstrap-token\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.039272 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z7r4z\" (UniqueName: \"kubernetes.io/projected/4578060b-5283-42ea-aa38-c925d4265270-kube-api-access-z7r4z\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.039384 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sr5wf\" (UniqueName: \"kubernetes.io/projected/9f9d33f3-df0d-4588-957d-6b7e3646eef4-kube-api-access-sr5wf\") pod \"ingress-canary-rfs77\" (UID: \"9f9d33f3-df0d-4588-957d-6b7e3646eef4\") " pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.039512 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dfbc89-0a35-44dc-968d-ff63a407d71e-config\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.039828 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.039944 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-console-config\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.040064 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-oauth-config\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.040177 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-trusted-ca\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.040303 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/49c4be94-985a-4c36-bb76-9dc6cdb0da17-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-b59qh\" (UID: \"49c4be94-985a-4c36-bb76-9dc6cdb0da17\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038774 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-service-ca\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.040465 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/9f9d33f3-df0d-4588-957d-6b7e3646eef4-cert\") pod \"ingress-canary-rfs77\" (UID: \"9f9d33f3-df0d-4588-957d-6b7e3646eef4\") " pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.039874 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-config\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.040644 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.040675 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-trusted-ca-bundle\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.040875 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-registry-certificates\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.038229 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/25efae93-08ee-4c4a-88db-3faa88559398-ca-trust-extracted\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.041146 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b1dfbc89-0a35-44dc-968d-ff63a407d71e-config\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.042323 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-console-config\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.042903 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-oauth-serving-cert\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.044026 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:37.544009954 +0000 UTC m=+148.364358929 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.044764 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/2b3113af-a4c8-498d-b357-21038e8ff69e-config-volume\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.045456 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-trusted-ca\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.045500 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/25efae93-08ee-4c4a-88db-3faa88559398-installation-pull-secrets\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.045584 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-serving-cert\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.045831 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5092a2-a268-42c6-98be-b902ae96f92f-config-volume\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.046090 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/e00eab55-a637-4e32-800b-b4703b747bc2-profile-collector-cert\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.046345 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5092a2-a268-42c6-98be-b902ae96f92f-secret-volume\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.046789 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/2b3113af-a4c8-498d-b357-21038e8ff69e-metrics-tls\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.046811 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/b1dfbc89-0a35-44dc-968d-ff63a407d71e-serving-cert\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.046886 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/e00eab55-a637-4e32-800b-b4703b747bc2-srv-cert\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.047194 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-oauth-config\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.047358 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5b9f87f2-6e86-4a80-9725-8d94129a946a-node-bootstrap-token\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.049514 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-registry-tls\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.049691 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5b9f87f2-6e86-4a80-9725-8d94129a946a-certs\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.050021 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/49c4be94-985a-4c36-bb76-9dc6cdb0da17-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-b59qh\" (UID: \"49c4be94-985a-4c36-bb76-9dc6cdb0da17\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.050108 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.086026 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6"] Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.087342 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tprdj\" (UniqueName: \"kubernetes.io/projected/5b9f87f2-6e86-4a80-9725-8d94129a946a-kube-api-access-tprdj\") pod \"machine-config-server-57kr6\" (UID: \"5b9f87f2-6e86-4a80-9725-8d94129a946a\") " pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:37 crc kubenswrapper[5102]: W0123 06:56:37.092720 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod825445e2_af9b_498b_afc5_3af404eeacf2.slice/crio-10e17f2e10824302150e0a73331ac82dbca3ceed3a0eecf4be729db5a809c877 WatchSource:0}: Error finding container 10e17f2e10824302150e0a73331ac82dbca3ceed3a0eecf4be729db5a809c877: Status 404 returned error can't find the container with id 10e17f2e10824302150e0a73331ac82dbca3ceed3a0eecf4be729db5a809c877 Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.112397 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-bound-sa-token\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.257247 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.257875 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:37.757846101 +0000 UTC m=+148.578195086 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.260714 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk69m\" (UniqueName: \"kubernetes.io/projected/ac5092a2-a268-42c6-98be-b902ae96f92f-kube-api-access-mk69m\") pod \"collect-profiles-29485845-sm4fm\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.289690 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sr5wf\" (UniqueName: \"kubernetes.io/projected/9f9d33f3-df0d-4588-957d-6b7e3646eef4-kube-api-access-sr5wf\") pod \"ingress-canary-rfs77\" (UID: \"9f9d33f3-df0d-4588-957d-6b7e3646eef4\") " pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.291112 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8lfr\" (UniqueName: \"kubernetes.io/projected/2b3113af-a4c8-498d-b357-21038e8ff69e-kube-api-access-f8lfr\") pod \"dns-default-w2xbt\" (UID: \"2b3113af-a4c8-498d-b357-21038e8ff69e\") " pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.292435 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hcm89\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-kube-api-access-hcm89\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.303946 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-57kr6" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.304121 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8n9kp\" (UniqueName: \"kubernetes.io/projected/b1dfbc89-0a35-44dc-968d-ff63a407d71e-kube-api-access-8n9kp\") pod \"service-ca-operator-777779d784-wdstp\" (UID: \"b1dfbc89-0a35-44dc-968d-ff63a407d71e\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.443960 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.453820 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:37.953782706 +0000 UTC m=+148.774131681 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.530117 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tpwxs\" (UniqueName: \"kubernetes.io/projected/e00eab55-a637-4e32-800b-b4703b747bc2-kube-api-access-tpwxs\") pod \"catalog-operator-68c6474976-tsjs5\" (UID: \"e00eab55-a637-4e32-800b-b4703b747bc2\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.544843 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s75l6\" (UniqueName: \"kubernetes.io/projected/49c4be94-985a-4c36-bb76-9dc6cdb0da17-kube-api-access-s75l6\") pod \"control-plane-machine-set-operator-78cbb6b69f-b59qh\" (UID: \"49c4be94-985a-4c36-bb76-9dc6cdb0da17\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.545117 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.545590 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z7r4z\" (UniqueName: \"kubernetes.io/projected/4578060b-5283-42ea-aa38-c925d4265270-kube-api-access-z7r4z\") pod \"console-f9d7485db-k7ghh\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.545201 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.045184901 +0000 UTC m=+148.865533876 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.545841 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.546499 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.046481484 +0000 UTC m=+148.866830459 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.550958 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kddkv\" (UniqueName: \"kubernetes.io/projected/5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe-kube-api-access-kddkv\") pod \"migrator-59844c95c7-jlcpp\" (UID: \"5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.551328 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.553313 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/ca1d7d5e-03bb-4a23-85ae-0711da60bc42-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-f4prk\" (UID: \"ca1d7d5e-03bb-4a23-85ae-0711da60bc42\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.557046 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.565314 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.572981 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-rfs77" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.764490 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.765009 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.264986666 +0000 UTC m=+149.085335641 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.802905 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.812822 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.848304 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.848768 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.849662 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.866582 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.867075 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.367064463 +0000 UTC m=+149.187413438 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.967205 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:37 crc kubenswrapper[5102]: E0123 06:56:37.967912 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.46789754 +0000 UTC m=+149.288246505 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.968487 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" event={"ID":"f4521e45-5ad8-4088-ac0c-86ec8d1149a2","Type":"ContainerStarted","Data":"b912c42ac80ec429d8ae248e08b776b6e9cae0a77f24fab1e40a8151b590e544"} Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.969873 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" event={"ID":"7d15ae6c-628c-446e-b6d3-2a1f58983409","Type":"ContainerStarted","Data":"f66b26412109afb9e3b0cc39fd232f9eef5d3c494d02c683f43e6307d9dbc75a"} Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.970625 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" event={"ID":"431cc446-b71e-4e5f-8689-b93573a96e44","Type":"ContainerStarted","Data":"b0b9746958a0a263010cb0f43a4971dffa14c66493ec7d43fdc1ac889a666050"} Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.972488 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" event={"ID":"6cfe0251-9d72-45fb-9df0-1f58d2ed6002","Type":"ContainerStarted","Data":"3cfbd5297504368fd52d382ae3843ff8b74a6ea95aa0a132dc77e1c53f291925"} Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.974738 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-d2tlh" event={"ID":"825445e2-af9b-498b-afc5-3af404eeacf2","Type":"ContainerStarted","Data":"10e17f2e10824302150e0a73331ac82dbca3ceed3a0eecf4be729db5a809c877"} Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.983435 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-57kr6" event={"ID":"5b9f87f2-6e86-4a80-9725-8d94129a946a","Type":"ContainerStarted","Data":"2dbfb002d6d534bf0b84f8305430b455e5b39097ee882349f027affe2c99cbcb"} Jan 23 06:56:37 crc kubenswrapper[5102]: I0123 06:56:37.985752 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" event={"ID":"38a43c31-7b58-4d32-8d88-66c5910a8207","Type":"ContainerStarted","Data":"d644094c2b6960a83a7d594aa79eefd0dfe1041cbe1607b5d8e92fbed60ee49d"} Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.077129 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.077723 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.577711015 +0000 UTC m=+149.398059990 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.178284 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.178399 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.678383087 +0000 UTC m=+149.498732062 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.178689 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.179118 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.679086601 +0000 UTC m=+149.499435566 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.279490 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.279695 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.779666329 +0000 UTC m=+149.600015304 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.279800 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.280177 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.780160586 +0000 UTC m=+149.600509561 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.380883 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.381043 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.881014194 +0000 UTC m=+149.701363169 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.381382 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.381774 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.881764788 +0000 UTC m=+149.702113763 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.482766 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.483075 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:38.983020439 +0000 UTC m=+149.803369454 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.584809 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.589490 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.089467202 +0000 UTC m=+149.909816177 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.692763 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.693170 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.193135523 +0000 UTC m=+150.013484628 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.796513 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.797350 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.297332142 +0000 UTC m=+150.117681117 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.898488 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:38 crc kubenswrapper[5102]: E0123 06:56:38.898840 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.398820441 +0000 UTC m=+150.219169416 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:38 crc kubenswrapper[5102]: I0123 06:56:38.993021 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-d2tlh" event={"ID":"825445e2-af9b-498b-afc5-3af404eeacf2","Type":"ContainerStarted","Data":"56e5dd54c75fd25c583a9008d94d28fdc4cfbb780179613f7ec99e15cd370150"} Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.000275 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.000888 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.500863097 +0000 UTC m=+150.321212072 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.004852 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" event={"ID":"7d15ae6c-628c-446e-b6d3-2a1f58983409","Type":"ContainerStarted","Data":"1e1adebfb0ca082fdb8705881e9ff2f2ce778530ac1c5019564c973f03b9938a"} Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.006129 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" event={"ID":"431cc446-b71e-4e5f-8689-b93573a96e44","Type":"ContainerStarted","Data":"0c8d18ab7f92a8e8b1c14e96b5fe4fb5bdef73ea2d9ad881b863756630e5fee6"} Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.101937 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.103755 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.603716192 +0000 UTC m=+150.424065167 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.121430 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-d2tlh" podStartSLOduration=127.121407387 podStartE2EDuration="2m7.121407387s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:39.041098609 +0000 UTC m=+149.861447584" watchObservedRunningTime="2026-01-23 06:56:39.121407387 +0000 UTC m=+149.941756362" Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.121829 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-52vtx" podStartSLOduration=127.121823281 podStartE2EDuration="2m7.121823281s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:39.119128112 +0000 UTC m=+149.939477087" watchObservedRunningTime="2026-01-23 06:56:39.121823281 +0000 UTC m=+149.942172256" Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.204235 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.204723 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.704704185 +0000 UTC m=+150.525053160 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.306239 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.306494 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.806453341 +0000 UTC m=+150.626802316 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.306546 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.306917 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.806907126 +0000 UTC m=+150.627256101 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.408178 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.408857 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:39.908809289 +0000 UTC m=+150.729158264 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.484091 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-wqtx6"] Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.486662 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q"] Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.491917 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-n5pkw"] Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.509733 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.510239 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.010214195 +0000 UTC m=+150.830563330 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.516608 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwvr2"] Jan 23 06:56:39 crc kubenswrapper[5102]: W0123 06:56:39.549378 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podaffdc918_cd11_4a65_8b67_0c4bc2bbadfc.slice/crio-d79afc7b483e629f12c0ea09a3d7add1c7d7f0a09ad305657637e7299f30d25b WatchSource:0}: Error finding container d79afc7b483e629f12c0ea09a3d7add1c7d7f0a09ad305657637e7299f30d25b: Status 404 returned error can't find the container with id d79afc7b483e629f12c0ea09a3d7add1c7d7f0a09ad305657637e7299f30d25b Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.619256 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.620047 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.120019679 +0000 UTC m=+150.940368654 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.720896 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.721225 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.221213528 +0000 UTC m=+151.041562493 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.738440 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-mhgdk"] Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.778666 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf"] Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.836314 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-ddjwb"] Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.840100 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.840661 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.340645911 +0000 UTC m=+151.160994886 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.840966 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.873926 5102 patch_prober.go:28] interesting pod/router-default-5444994796-d2tlh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 06:56:39 crc kubenswrapper[5102]: [-]has-synced failed: reason withheld Jan 23 06:56:39 crc kubenswrapper[5102]: [+]process-running ok Jan 23 06:56:39 crc kubenswrapper[5102]: healthz check failed Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.874020 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-d2tlh" podUID="825445e2-af9b-498b-afc5-3af404eeacf2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.885536 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56"] Jan 23 06:56:39 crc kubenswrapper[5102]: I0123 06:56:39.948473 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:39 crc kubenswrapper[5102]: E0123 06:56:39.948785 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.44877203 +0000 UTC m=+151.269121005 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.049916 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.050086 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.550063102 +0000 UTC m=+151.370412077 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.050150 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.050477 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.550463945 +0000 UTC m=+151.370812920 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.061149 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" event={"ID":"f4521e45-5ad8-4088-ac0c-86ec8d1149a2","Type":"ContainerStarted","Data":"6145c8773dae6bb733fb4670e0acdf976056339b17b13e5c0dff2aaf9560722f"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.076952 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" event={"ID":"b86088e5-98cc-41c9-9c95-710ab08cbcf5","Type":"ContainerStarted","Data":"f36072e95c0d183e24cb892a09927ac6a9a8c93b119e872f60f0cbac3da9e322"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.077015 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" event={"ID":"b86088e5-98cc-41c9-9c95-710ab08cbcf5","Type":"ContainerStarted","Data":"4359faed2f4573e75b04f6655e15cd2321c99a00759814c32f69557e079a02ec"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.090140 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-jbjds" podStartSLOduration=128.090118997 podStartE2EDuration="2m8.090118997s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.083104446 +0000 UTC m=+150.903453421" watchObservedRunningTime="2026-01-23 06:56:40.090118997 +0000 UTC m=+150.910467972" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.121094 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-fd24q" podStartSLOduration=128.121077202 podStartE2EDuration="2m8.121077202s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.118702113 +0000 UTC m=+150.939051088" watchObservedRunningTime="2026-01-23 06:56:40.121077202 +0000 UTC m=+150.941426177" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.239711 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.240764 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.740730992 +0000 UTC m=+151.561079967 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.259097 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-shspc"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.264253 5102 generic.go:334] "Generic (PLEG): container finished" podID="38a43c31-7b58-4d32-8d88-66c5910a8207" containerID="3f0cdae2925cfa16f75f50408ee28c40a75ee4883cc9aeef5a3215cb1a14bfc7" exitCode=0 Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.264505 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" event={"ID":"38a43c31-7b58-4d32-8d88-66c5910a8207","Type":"ContainerDied","Data":"3f0cdae2925cfa16f75f50408ee28c40a75ee4883cc9aeef5a3215cb1a14bfc7"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.267581 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-5q97t"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.278662 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" event={"ID":"6cfe0251-9d72-45fb-9df0-1f58d2ed6002","Type":"ContainerStarted","Data":"f3e99154765a0fe1c09fbf683cddce87a2d15fa5b43d42896cda65a94200b0a0"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.279196 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.279431 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.293175 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" event={"ID":"90313bf5-831e-4837-9727-9fe8d2a823b4","Type":"ContainerStarted","Data":"84c85365c385ebe8bfcd580f70a570fb7a3f12229c1ac42d899d5eb7d7baec47"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.301935 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.303405 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" event={"ID":"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3","Type":"ContainerStarted","Data":"cb4b4d326a1d45ed1bbf4d7721ccc2f35c41fc3d192eb751b84fcc1b2478c19c"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.303453 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" event={"ID":"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3","Type":"ContainerStarted","Data":"743bd99929be615ef920bab053356d055d6716e2be7165f265e73e4db132f35e"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.303944 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.321845 5102 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-wqtx6 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.321894 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.329900 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" event={"ID":"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea","Type":"ContainerStarted","Data":"9ba91183364087e9b99812a88aed0f94ca6dcf574dba2a1a06830490e6327df3"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.341707 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.342168 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.842149958 +0000 UTC m=+151.662498933 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: W0123 06:56:40.359927 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod7cf0a44b_e9bc_42da_8883_eb6c9a58f37e.slice/crio-0a06056288a05bf430b84728f6f03c22e156a628852509c724f92c2561055990 WatchSource:0}: Error finding container 0a06056288a05bf430b84728f6f03c22e156a628852509c724f92c2561055990: Status 404 returned error can't find the container with id 0a06056288a05bf430b84728f6f03c22e156a628852509c724f92c2561055990 Jan 23 06:56:40 crc kubenswrapper[5102]: W0123 06:56:40.373452 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podfb53c656_189e_4c35_94c5_f7ae81316c3c.slice/crio-c65938da55a2bc483d4b28c75ba336d78d7d976c691fa590f3cb4fa55191196f WatchSource:0}: Error finding container c65938da55a2bc483d4b28c75ba336d78d7d976c691fa590f3cb4fa55191196f: Status 404 returned error can't find the container with id c65938da55a2bc483d4b28c75ba336d78d7d976c691fa590f3cb4fa55191196f Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.374041 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n5pkw" event={"ID":"6ff670a5-d3e6-4fd5-97ad-2b07276283e8","Type":"ContainerStarted","Data":"cc87359432729c20eccbde81851a6954c5df1483ecda920f952c287fa26c38ec"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.374106 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-n5pkw" event={"ID":"6ff670a5-d3e6-4fd5-97ad-2b07276283e8","Type":"ContainerStarted","Data":"f22d10a3312d548929062a7a52ba9fb4ba8cd72c00486dbda1dc144fc977b6d5"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.375465 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-n5pkw" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.375732 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" podStartSLOduration=128.375709319 podStartE2EDuration="2m8.375709319s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.37481962 +0000 UTC m=+151.195168595" watchObservedRunningTime="2026-01-23 06:56:40.375709319 +0000 UTC m=+151.196058294" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.376482 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" podStartSLOduration=128.376475595 podStartE2EDuration="2m8.376475595s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.333220023 +0000 UTC m=+151.153568998" watchObservedRunningTime="2026-01-23 06:56:40.376475595 +0000 UTC m=+151.196824570" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.391577 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-vscws"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.399558 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.401699 5102 patch_prober.go:28] interesting pod/downloads-7954f5f757-n5pkw container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.401785 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n5pkw" podUID="6ff670a5-d3e6-4fd5-97ad-2b07276283e8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.403445 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-57kr6" event={"ID":"5b9f87f2-6e86-4a80-9725-8d94129a946a","Type":"ContainerStarted","Data":"fb4f86214a6ba0234d870517875e4ddad208e8e9eafd2d61f16f8a00980f26e7"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.409804 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.412553 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.420794 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.427452 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-n5pkw" podStartSLOduration=128.427420161 podStartE2EDuration="2m8.427420161s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.410747899 +0000 UTC m=+151.231096874" watchObservedRunningTime="2026-01-23 06:56:40.427420161 +0000 UTC m=+151.247769136" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.428212 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-rfs77"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.428266 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" event={"ID":"bed003ce-1b2e-4e38-982d-aed7be7819c8","Type":"ContainerStarted","Data":"35d43cdddc5fe632637b10722a66470a5e056a4f009beb985ff6dab74c38b438"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.437719 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" event={"ID":"affdc918-cd11-4a65-8b67-0c4bc2bbadfc","Type":"ContainerStarted","Data":"d79afc7b483e629f12c0ea09a3d7add1c7d7f0a09ad305657637e7299f30d25b"} Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.437771 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.449447 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.450243 5102 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-cwvr2 container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.450328 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" podUID="affdc918-cd11-4a65-8b67-0c4bc2bbadfc" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.450926 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.950899937 +0000 UTC m=+151.771248912 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.451090 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.453706 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-jcg67"] Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.455443 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:40.955418468 +0000 UTC m=+151.775767633 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.464390 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.464451 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-57kr6" podStartSLOduration=6.464424996 podStartE2EDuration="6.464424996s" podCreationTimestamp="2026-01-23 06:56:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.457072892 +0000 UTC m=+151.277421867" watchObservedRunningTime="2026-01-23 06:56:40.464424996 +0000 UTC m=+151.284773971" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.557994 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.575354 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4"] Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.577192 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.077128075 +0000 UTC m=+151.897477050 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.590067 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.594298 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.094272683 +0000 UTC m=+151.914621658 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: W0123 06:56:40.601033 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5ac16b8d_73f7_4bcb_907e_6786d0aa2ebe.slice/crio-935eeabba887bb4601a43b87005dbdc3cd0b9650425612b63b9ade95cb254bc8 WatchSource:0}: Error finding container 935eeabba887bb4601a43b87005dbdc3cd0b9650425612b63b9ade95cb254bc8: Status 404 returned error can't find the container with id 935eeabba887bb4601a43b87005dbdc3cd0b9650425612b63b9ade95cb254bc8 Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.609758 5102 csr.go:261] certificate signing request csr-5lw6z is approved, waiting to be issued Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.623322 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.628881 5102 csr.go:257] certificate signing request csr-5lw6z is issued Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.694512 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.695398 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.695452 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn"] Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.698790 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.198759191 +0000 UTC m=+152.019108156 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.729999 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-w2xbt"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.738974 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-wdstp"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.779958 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.780029 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-ldkln"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.780042 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.798569 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.798950 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.298935046 +0000 UTC m=+152.119284021 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.798986 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.800130 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.820620 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.820697 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.824687 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-nr8q6" podStartSLOduration=128.824667888 podStartE2EDuration="2m8.824667888s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.542749718 +0000 UTC m=+151.363098713" watchObservedRunningTime="2026-01-23 06:56:40.824667888 +0000 UTC m=+151.645016863" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.826403 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwhrv"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.827287 5102 patch_prober.go:28] interesting pod/router-default-5444994796-d2tlh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 06:56:40 crc kubenswrapper[5102]: [-]has-synced failed: reason withheld Jan 23 06:56:40 crc kubenswrapper[5102]: [+]process-running ok Jan 23 06:56:40 crc kubenswrapper[5102]: healthz check failed Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.827328 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-d2tlh" podUID="825445e2-af9b-498b-afc5-3af404eeacf2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.827381 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-k7ghh"] Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.828308 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" podStartSLOduration=128.828294748 podStartE2EDuration="2m8.828294748s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:40.609049652 +0000 UTC m=+151.429398627" watchObservedRunningTime="2026-01-23 06:56:40.828294748 +0000 UTC m=+151.648643723" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.892087 5102 patch_prober.go:28] interesting pod/apiserver-76f77b778f-zrhhc container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]log ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]etcd ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/start-apiserver-admission-initializer ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/generic-apiserver-start-informers ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/max-in-flight-filter ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/storage-object-count-tracker-hook ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/image.openshift.io-apiserver-caches ok Jan 23 06:56:40 crc kubenswrapper[5102]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Jan 23 06:56:40 crc kubenswrapper[5102]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/project.openshift.io-projectcache ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/openshift.io-startinformers ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/openshift.io-restmapperupdater ok Jan 23 06:56:40 crc kubenswrapper[5102]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Jan 23 06:56:40 crc kubenswrapper[5102]: livez check failed Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.892232 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" podUID="6cfe0251-9d72-45fb-9df0-1f58d2ed6002" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:40 crc kubenswrapper[5102]: I0123 06:56:40.900811 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:40 crc kubenswrapper[5102]: E0123 06:56:40.901093 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.401075147 +0000 UTC m=+152.221424122 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.004992 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.005982 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.505956618 +0000 UTC m=+152.326305593 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.106920 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.107696 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.607678194 +0000 UTC m=+152.428027169 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.209084 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.209587 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.709557316 +0000 UTC m=+152.529906331 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.341717 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.342119 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.842094802 +0000 UTC m=+152.662443777 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.445198 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.445627 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:41.945612488 +0000 UTC m=+152.765961463 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.541947 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" event={"ID":"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e","Type":"ContainerStarted","Data":"0a06056288a05bf430b84728f6f03c22e156a628852509c724f92c2561055990"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.550383 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.550844 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.05082049 +0000 UTC m=+152.871169465 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.563915 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" event={"ID":"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7","Type":"ContainerStarted","Data":"4d6d4df3d7a540b2084466462c8af62bd9caebc29ccc946a059fbd74f7720179"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.569896 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" event={"ID":"90313bf5-831e-4837-9727-9fe8d2a823b4","Type":"ContainerStarted","Data":"4ee6ae98b5c5fe49853d012091aacfedd8d1e3419efab227f075cfeb77ebec69"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.570417 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.571727 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" event={"ID":"c0c425b8-c2d0-4f99-b237-a8f18db7e8ea","Type":"ContainerStarted","Data":"7aa5ced8c66cdfe34f9d826b55c549a601c22363d14f3d35332f2f95bc35208d"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.573657 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" event={"ID":"ee5f921e-710b-4b7d-83b0-5e17137593a0","Type":"ContainerStarted","Data":"c1b69b6a7d187fa86e6a8038edbd53aca6a706af9cf6e4dd2e1efc7ec090bc91"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.575616 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vscws" event={"ID":"ac8be70a-9f33-4b73-a175-9c5c0dd3f262","Type":"ContainerStarted","Data":"d341ef4dba43b36f6fc1723cdb8d0fe6292f198c5ce64aeaabceafdbd0991604"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.577724 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" event={"ID":"affdc918-cd11-4a65-8b67-0c4bc2bbadfc","Type":"ContainerStarted","Data":"cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.583930 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.585959 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" event={"ID":"e00eab55-a637-4e32-800b-b4703b747bc2","Type":"ContainerStarted","Data":"8eeec057ffd11184202a6c4b864e55c4ec06d0fe47cdfec246a180a44bdecbac"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.587575 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" event={"ID":"bed003ce-1b2e-4e38-982d-aed7be7819c8","Type":"ContainerStarted","Data":"c21006a854d423e123519882e8c57aab7d8a65ea2588ae5ff711a12749039b7e"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.589001 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.591167 5102 patch_prober.go:28] interesting pod/console-operator-58897d9998-ddjwb container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" start-of-body= Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.591219 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" podUID="bed003ce-1b2e-4e38-982d-aed7be7819c8" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.11:8443/readyz\": dial tcp 10.217.0.11:8443: connect: connection refused" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.602059 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-v9rnf" podStartSLOduration=129.602046056 podStartE2EDuration="2m9.602046056s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:41.59917115 +0000 UTC m=+152.419520125" watchObservedRunningTime="2026-01-23 06:56:41.602046056 +0000 UTC m=+152.422395031" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.625466 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" event={"ID":"ac5092a2-a268-42c6-98be-b902ae96f92f","Type":"ContainerStarted","Data":"ca3fd4b1df6460002f67cd73b9f6e604e2bb8fbff75088a0d05ff96d78517637"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.625531 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" event={"ID":"fb53c656-189e-4c35-94c5-f7ae81316c3c","Type":"ContainerStarted","Data":"c65938da55a2bc483d4b28c75ba336d78d7d976c691fa590f3cb4fa55191196f"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.631454 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2027-01-23 06:51:40 +0000 UTC, rotation deadline is 2026-12-13 19:09:54.787623688 +0000 UTC Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.631486 5102 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 7788h13m13.156140348s for next certificate rotation Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.636664 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-mhgdk" podStartSLOduration=129.636644961 podStartE2EDuration="2m9.636644961s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:41.624264781 +0000 UTC m=+152.444613756" watchObservedRunningTime="2026-01-23 06:56:41.636644961 +0000 UTC m=+152.456993956" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.653022 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.655009 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.154988498 +0000 UTC m=+152.975337473 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.660295 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.713459 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-shspc" event={"ID":"40620ab8-3f8a-415b-aa67-ca9f813e51a7","Type":"ContainerStarted","Data":"96d2f8d8efd8bcef8a2025d8920d6f0c6175a2ecbef5bb5ff84cb95d7e5c381c"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.716414 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" podStartSLOduration=129.71640291 podStartE2EDuration="2m9.71640291s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:41.714087034 +0000 UTC m=+152.534436009" watchObservedRunningTime="2026-01-23 06:56:41.71640291 +0000 UTC m=+152.536751885" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.754095 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.755202 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.255186903 +0000 UTC m=+153.075535878 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.821448 5102 patch_prober.go:28] interesting pod/router-default-5444994796-d2tlh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 06:56:41 crc kubenswrapper[5102]: [-]has-synced failed: reason withheld Jan 23 06:56:41 crc kubenswrapper[5102]: [+]process-running ok Jan 23 06:56:41 crc kubenswrapper[5102]: healthz check failed Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.821551 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-d2tlh" podUID="825445e2-af9b-498b-afc5-3af404eeacf2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.831986 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" event={"ID":"4f3ef19c-f82b-444d-9133-364448e010c2","Type":"ContainerStarted","Data":"8c450eccd779fed425dbde742355195458fc3c174f9e97ffe1fb0957354aeb94"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.833804 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" event={"ID":"ca1d7d5e-03bb-4a23-85ae-0711da60bc42","Type":"ContainerStarted","Data":"58ef561655b293065652317b264cd9159b45d165d3051d4009c0dc323c764260"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.855824 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.856304 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.356276699 +0000 UTC m=+153.176625864 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.859822 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" event={"ID":"7e288133-e5c7-46c6-b29a-530d8a1c7aca","Type":"ContainerStarted","Data":"3a82978a969128ba2fcad518b0d175d27a59c372b27011178704b6a7af8d7459"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.860817 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.862487 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" event={"ID":"b5d34f41-83c0-4ad5-a95a-977fbb5dd623","Type":"ContainerStarted","Data":"bc16c678d15f49434919d23305446a6b6bb2dd67cf441effda7f254c31be8d2f"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.862563 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" event={"ID":"b5d34f41-83c0-4ad5-a95a-977fbb5dd623","Type":"ContainerStarted","Data":"b79ed634f0ec4539f5d76feaf34a218c16dc0851e19b71f2764a39f1b89ada94"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.865098 5102 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lttd5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.865158 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.866022 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-w2xbt" event={"ID":"2b3113af-a4c8-498d-b357-21038e8ff69e","Type":"ContainerStarted","Data":"6de48bc42f7c2395386b2c3c7606058b098aee8ce897270d92c62bfa4d8a0bcb"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.868068 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" event={"ID":"d1d9d0ca-c225-4b42-8c2a-0264deb35d71","Type":"ContainerStarted","Data":"e37dc49dcd610cb1600ffabe97be44b9317ac4b191787f60a4bb5d578b9532c0"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.868092 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" event={"ID":"d1d9d0ca-c225-4b42-8c2a-0264deb35d71","Type":"ContainerStarted","Data":"40e9543e3867e1cc369a7559494759b1a5aa98748d168e5d6a95bb9773a524ae"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.870137 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" event={"ID":"3a601a63-5329-456d-87f4-c9dc191e8589","Type":"ContainerStarted","Data":"bab36585a75c40295d1a3ed1a77fe96f7afa52814377c08d965a7abcbefc99c1"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.872880 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" event={"ID":"49c4be94-985a-4c36-bb76-9dc6cdb0da17","Type":"ContainerStarted","Data":"1e906b9b91aecacab3a1ee6ca0a5bc1f17c1743124a3dbe03b05c8fb4cbcdb1b"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.874071 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" event={"ID":"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245","Type":"ContainerStarted","Data":"f72c5838c1863cf2b4f8d38d98065f64075e03654ede6e882bd6255915e78549"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.875592 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rfs77" event={"ID":"9f9d33f3-df0d-4588-957d-6b7e3646eef4","Type":"ContainerStarted","Data":"b8bf6c2d9c68f847862ad14d8bf1172ee49b9b379ecbaa6b901e2727a9856a57"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.876903 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" event={"ID":"3b3cdc03-9859-47b4-950a-3b01b64a11fb","Type":"ContainerStarted","Data":"afa8aff69c6042a57e476023037db9c1716d5e72618d58eb2cc4204029195210"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.878602 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" event={"ID":"5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe","Type":"ContainerStarted","Data":"935eeabba887bb4601a43b87005dbdc3cd0b9650425612b63b9ade95cb254bc8"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.880276 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" event={"ID":"cda46479-cb25-47ec-8de7-31c9d6e22960","Type":"ContainerStarted","Data":"c1aa8a5566dd8cbd318ebe5837aa3db1bb7c802c692d0c896ee2ca59ea863ad1"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.888158 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" event={"ID":"38a43c31-7b58-4d32-8d88-66c5910a8207","Type":"ContainerStarted","Data":"6b3cf4a93d6b586c06f54ddd076151463632dba8d09c67126f73bf0184cb4b80"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.889304 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" podStartSLOduration=129.889290903 podStartE2EDuration="2m9.889290903s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:41.883920265 +0000 UTC m=+152.704269250" watchObservedRunningTime="2026-01-23 06:56:41.889290903 +0000 UTC m=+152.709639878" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.895799 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" event={"ID":"deca5a30-509e-4519-a7f5-64944e4b7dd8","Type":"ContainerStarted","Data":"bc249f8293774cb8361b4e18830b1f9f834d307a59320a59e91bb5d49e13e03f"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.921727 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" podStartSLOduration=129.921701955 podStartE2EDuration="2m9.921701955s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:41.921653883 +0000 UTC m=+152.742002848" watchObservedRunningTime="2026-01-23 06:56:41.921701955 +0000 UTC m=+152.742050930" Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.922992 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" event={"ID":"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a","Type":"ContainerStarted","Data":"08056b6eea0ae5e1e482d4bf4669d2e0d93c2576c0dc13d0ee3060dc303d1ecc"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.924038 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-k7ghh" event={"ID":"4578060b-5283-42ea-aa38-c925d4265270","Type":"ContainerStarted","Data":"5b0c0927077f3de1316f4117793f10c51397e71c1b20ce4f4375ab5e4aa49c59"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.945764 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" event={"ID":"b1dfbc89-0a35-44dc-968d-ff63a407d71e","Type":"ContainerStarted","Data":"789125370c9f9d799800421e5c6193e2f265c3343946d6a5e2010480cd63d6b0"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.957051 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" event={"ID":"2de5e0db-4684-4211-839d-c536e81a044f","Type":"ContainerStarted","Data":"7fa15d3f972d6088aa743fbbb9c9264df2df60ae2aa1907ef7548a0cf46085c0"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.957236 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:41 crc kubenswrapper[5102]: E0123 06:56:41.959266 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.459241098 +0000 UTC m=+153.279590223 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.970817 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" event={"ID":"01ee0518-e113-4fbd-af00-cb2b6d43baef","Type":"ContainerStarted","Data":"659998129d34e70bcf620e25e13e8373ca82da334f10c258fc859da654d77243"} Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.975058 5102 patch_prober.go:28] interesting pod/downloads-7954f5f757-n5pkw container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 06:56:41 crc kubenswrapper[5102]: I0123 06:56:41.975142 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n5pkw" podUID="6ff670a5-d3e6-4fd5-97ad-2b07276283e8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:41.995359 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.069046 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.072680 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.572660341 +0000 UTC m=+153.393009526 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.170392 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.171950 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.671924767 +0000 UTC m=+153.492273742 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.272789 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.273214 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.773198688 +0000 UTC m=+153.593547663 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.374369 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.375206 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.875172943 +0000 UTC m=+153.695521918 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.482922 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.483634 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:42.983518649 +0000 UTC m=+153.803867624 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.584222 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.584639 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.084622715 +0000 UTC m=+153.904971690 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.686729 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.687183 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.187149289 +0000 UTC m=+154.007498264 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.787637 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.788007 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.287990506 +0000 UTC m=+154.108339481 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.859778 5102 patch_prober.go:28] interesting pod/router-default-5444994796-d2tlh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 06:56:42 crc kubenswrapper[5102]: [-]has-synced failed: reason withheld Jan 23 06:56:42 crc kubenswrapper[5102]: [+]process-running ok Jan 23 06:56:42 crc kubenswrapper[5102]: healthz check failed Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.860242 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-d2tlh" podUID="825445e2-af9b-498b-afc5-3af404eeacf2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:42 crc kubenswrapper[5102]: I0123 06:56:42.917631 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:42 crc kubenswrapper[5102]: E0123 06:56:42.917994 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.417981708 +0000 UTC m=+154.238330683 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.020077 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.020267 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.520239462 +0000 UTC m=+154.340588437 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.020341 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.020689 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.520671626 +0000 UTC m=+154.341020601 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.089579 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-vscws" event={"ID":"ac8be70a-9f33-4b73-a175-9c5c0dd3f262","Type":"ContainerStarted","Data":"8f775a2454c1e4b9b7a30cc9608326a093386762f93097edaeb06d6f9a4767ef"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.123717 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.125495 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.625478175 +0000 UTC m=+154.445827150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.134775 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" event={"ID":"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e","Type":"ContainerStarted","Data":"c1fdb38da7841835576e585a743cbcddf0e62ebfc8a567ed70ce88f2bfb2b96f"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.135367 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-vscws" podStartSLOduration=131.135356772 podStartE2EDuration="2m11.135356772s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.127697978 +0000 UTC m=+153.948046943" watchObservedRunningTime="2026-01-23 06:56:43.135356772 +0000 UTC m=+153.955705747" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.192692 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" event={"ID":"7e288133-e5c7-46c6-b29a-530d8a1c7aca","Type":"ContainerStarted","Data":"9b422137ba595f63447da25ea697c65b1dc59450da109f57b7300fd4f80daa7a"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.195170 5102 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lttd5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.195218 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.224848 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.225171 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.725158613 +0000 UTC m=+154.545507588 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.231341 5102 generic.go:334] "Generic (PLEG): container finished" podID="4f3ef19c-f82b-444d-9133-364448e010c2" containerID="00e97dd3e5f3a7e130bb203faf6ade6cd007b1d2e34ab8cfbd7fe4f90bf88ef8" exitCode=0 Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.231529 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" event={"ID":"4f3ef19c-f82b-444d-9133-364448e010c2","Type":"ContainerDied","Data":"00e97dd3e5f3a7e130bb203faf6ade6cd007b1d2e34ab8cfbd7fe4f90bf88ef8"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.250026 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" event={"ID":"01ee0518-e113-4fbd-af00-cb2b6d43baef","Type":"ContainerStarted","Data":"7364d429eb2eb4c454b1a2a6838b37e315f5c4534497922b666da708b756f5bf"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.284900 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" event={"ID":"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a","Type":"ContainerStarted","Data":"0c74842838c684e94a0c57135e1366740b07d11ba28ed946fed1a5cc532361fa"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.323650 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" event={"ID":"32a6a23b-e2d2-48fc-81ea-ca2ae68f4245","Type":"ContainerStarted","Data":"feabc1659abdea2cd2f5e3319033caa173e3777b4681c3a8153ae2277b7c9dd1"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.326241 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.331646 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.831594076 +0000 UTC m=+154.651943051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.343255 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" event={"ID":"ac5092a2-a268-42c6-98be-b902ae96f92f","Type":"ContainerStarted","Data":"a0bd276003e94b7d0d858491a9c0d57240f58f4dd4a30e9b41902cbb17e87fca"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.358069 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-shspc" event={"ID":"40620ab8-3f8a-415b-aa67-ca9f813e51a7","Type":"ContainerStarted","Data":"f4349adee7c0be6cc90eb21bfa82efaa7d870192ba6cf380f4bfbcb1a60dc9eb"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.385646 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-rfs77" event={"ID":"9f9d33f3-df0d-4588-957d-6b7e3646eef4","Type":"ContainerStarted","Data":"00896ed4655ec8fce4498457635151b4932932b69b6b7361aa45a73c6e6fb46e"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.413863 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" event={"ID":"b1dfbc89-0a35-44dc-968d-ff63a407d71e","Type":"ContainerStarted","Data":"1beb5817d59601cb90558a8c25c3a72f51c8a699dbbfdfd4ae53f10a11450574"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.418944 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" event={"ID":"fb53c656-189e-4c35-94c5-f7ae81316c3c","Type":"ContainerStarted","Data":"c7240baaf930740a82684cc646c1c6e9ae1b455687383d0c2eaae90861a5ac6e"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.420066 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" event={"ID":"0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7","Type":"ContainerStarted","Data":"fd0fdf66836b657068303cc8ced21f69010fbc67f23dec2076befb92828e0a88"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.422006 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.435417 5102 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2tts4 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" start-of-body= Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.435480 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" podUID="0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.19:5443/healthz\": dial tcp 10.217.0.19:5443: connect: connection refused" Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.436638 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:43.936622212 +0000 UTC m=+154.756971187 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.432747 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.449233 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-g4kjg" podStartSLOduration=131.449215049 podStartE2EDuration="2m11.449215049s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.447903806 +0000 UTC m=+154.268252781" watchObservedRunningTime="2026-01-23 06:56:43.449215049 +0000 UTC m=+154.269564024" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.451210 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" event={"ID":"e00eab55-a637-4e32-800b-b4703b747bc2","Type":"ContainerStarted","Data":"7e623542657cecbdb5f9bdcf9eb029a4f8cb814526d0954d84a2f84c33e79ab7"} Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.451632 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.476120 5102 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-tsjs5 container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" start-of-body= Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.476172 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" podUID="e00eab55-a637-4e32-800b-b4703b747bc2" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.31:8443/healthz\": dial tcp 10.217.0.31:8443: connect: connection refused" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.522012 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-wdstp" podStartSLOduration=131.521974657 podStartE2EDuration="2m11.521974657s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.517269061 +0000 UTC m=+154.337618036" watchObservedRunningTime="2026-01-23 06:56:43.521974657 +0000 UTC m=+154.342323632" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.522887 5102 patch_prober.go:28] interesting pod/downloads-7954f5f757-n5pkw container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.522951 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n5pkw" podUID="6ff670a5-d3e6-4fd5-97ad-2b07276283e8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.545813 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.546037 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.046005473 +0000 UTC m=+154.866354448 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.547734 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.561562 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.061520956 +0000 UTC m=+154.881869931 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.565728 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" podStartSLOduration=131.565710565 podStartE2EDuration="2m11.565710565s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.563938296 +0000 UTC m=+154.384287271" watchObservedRunningTime="2026-01-23 06:56:43.565710565 +0000 UTC m=+154.386059540" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.585039 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" podStartSLOduration=131.584992143 podStartE2EDuration="2m11.584992143s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.582380257 +0000 UTC m=+154.402729232" watchObservedRunningTime="2026-01-23 06:56:43.584992143 +0000 UTC m=+154.405341118" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.640413 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-rfs77" podStartSLOduration=9.640374896 podStartE2EDuration="9.640374896s" podCreationTimestamp="2026-01-23 06:56:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.639107194 +0000 UTC m=+154.459456169" watchObservedRunningTime="2026-01-23 06:56:43.640374896 +0000 UTC m=+154.460723871" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.659874 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.660723 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.160686628 +0000 UTC m=+154.981035603 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.844529 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-ddjwb" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.851224 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" podStartSLOduration=131.851210014 podStartE2EDuration="2m11.851210014s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.793939268 +0000 UTC m=+154.614288243" watchObservedRunningTime="2026-01-23 06:56:43.851210014 +0000 UTC m=+154.671558989" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.853451 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.853931 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.353912113 +0000 UTC m=+155.174261088 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.884112 5102 patch_prober.go:28] interesting pod/router-default-5444994796-d2tlh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 06:56:43 crc kubenswrapper[5102]: [-]has-synced failed: reason withheld Jan 23 06:56:43 crc kubenswrapper[5102]: [+]process-running ok Jan 23 06:56:43 crc kubenswrapper[5102]: healthz check failed Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.884195 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-d2tlh" podUID="825445e2-af9b-498b-afc5-3af404eeacf2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.954957 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.955218 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.455177315 +0000 UTC m=+155.275526290 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.955515 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:43 crc kubenswrapper[5102]: E0123 06:56:43.956312 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.45626002 +0000 UTC m=+155.276608995 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.982204 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" podStartSLOduration=131.982183558 podStartE2EDuration="2m11.982183558s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.980084569 +0000 UTC m=+154.800433554" watchObservedRunningTime="2026-01-23 06:56:43.982183558 +0000 UTC m=+154.802532533" Jan 23 06:56:43 crc kubenswrapper[5102]: I0123 06:56:43.983293 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" podStartSLOduration=131.983280245 podStartE2EDuration="2m11.983280245s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:43.875734035 +0000 UTC m=+154.696083010" watchObservedRunningTime="2026-01-23 06:56:43.983280245 +0000 UTC m=+154.803629220" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.056367 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.056979 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.556957622 +0000 UTC m=+155.377306597 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.163617 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.209092 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.709069257 +0000 UTC m=+155.529418232 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.217484 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" podStartSLOduration=132.217463535 podStartE2EDuration="2m12.217463535s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:44.090224734 +0000 UTC m=+154.910573729" watchObservedRunningTime="2026-01-23 06:56:44.217463535 +0000 UTC m=+155.037812510" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.279840 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.280236 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.780215962 +0000 UTC m=+155.600564937 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.381705 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.382225 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.882205067 +0000 UTC m=+155.702554042 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.486456 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.486966 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:44.986944974 +0000 UTC m=+155.807293949 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.588822 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" event={"ID":"2de5e0db-4684-4211-839d-c536e81a044f","Type":"ContainerStarted","Data":"cd6959139d2bd9b52ee6abcc0f10ead66b677d9e767b8a75b6be5883ae19e576"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.590137 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.590645 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.090622835 +0000 UTC m=+155.910971810 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.637861 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" event={"ID":"cda46479-cb25-47ec-8de7-31c9d6e22960","Type":"ContainerStarted","Data":"4bd43375d5210559dbf648dc48059b34e1c725c4eb5226d8c2ca8573a840c338"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.639417 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.657958 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-7n6qf" podStartSLOduration=132.657935603 podStartE2EDuration="2m12.657935603s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:44.645902844 +0000 UTC m=+155.466251819" watchObservedRunningTime="2026-01-23 06:56:44.657935603 +0000 UTC m=+155.478284578" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.673982 5102 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bwhrv container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/healthz\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.674056 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.22:8080/healthz\": dial tcp 10.217.0.22:8080: connect: connection refused" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.692520 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.692736 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.192719233 +0000 UTC m=+156.013068208 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.692924 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.694032 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.194010657 +0000 UTC m=+156.014359632 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.717831 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-8qbr5" event={"ID":"ee5f921e-710b-4b7d-83b0-5e17137593a0","Type":"ContainerStarted","Data":"d12ca79e2d79957e9a7807a448b313524f00f96a40164e6e331141db2f971b2b"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.757647 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" event={"ID":"4f3ef19c-f82b-444d-9133-364448e010c2","Type":"ContainerStarted","Data":"1ea0436cab9e9951afba667c7ccc2c45c288c28a599bc27f1ac6d9da8d6b26c1"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.758090 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.781332 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" event={"ID":"3a601a63-5329-456d-87f4-c9dc191e8589","Type":"ContainerStarted","Data":"cfcf79dbfee56ea6d3d69e9cf0aab6d34108fafa5abc7207709046472114a8fb"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.788004 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-k7ghh" event={"ID":"4578060b-5283-42ea-aa38-c925d4265270","Type":"ContainerStarted","Data":"fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.792403 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" event={"ID":"3b3cdc03-9859-47b4-950a-3b01b64a11fb","Type":"ContainerStarted","Data":"e2c6f788eb38d39d525f99a91fd7d7ae8ac2300a6f36f3b362edf80b62b98be5"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.793432 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.795013 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.294991089 +0000 UTC m=+156.115340064 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.804368 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-shspc" event={"ID":"40620ab8-3f8a-415b-aa67-ca9f813e51a7","Type":"ContainerStarted","Data":"2117820dc8c895a8a85b78593a49ef26a001f0bc3d653bc86d35755ed5cd9ba8"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.814657 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" event={"ID":"deca5a30-509e-4519-a7f5-64944e4b7dd8","Type":"ContainerStarted","Data":"7fd1847c1fd5217d112d21b7ee5671196386d72903465468baf0e7f47128daba"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.814725 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" event={"ID":"deca5a30-509e-4519-a7f5-64944e4b7dd8","Type":"ContainerStarted","Data":"beb7b310654954b2495f9ef52403c2c566384ae532ccbd8bc34f0f67a63855d2"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.830190 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" event={"ID":"d1d9d0ca-c225-4b42-8c2a-0264deb35d71","Type":"ContainerStarted","Data":"877827b6174dbda91a9c3d49e247dbee09c7bdd133d71e0500db07d0334b0143"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.836071 5102 patch_prober.go:28] interesting pod/router-default-5444994796-d2tlh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 06:56:44 crc kubenswrapper[5102]: [-]has-synced failed: reason withheld Jan 23 06:56:44 crc kubenswrapper[5102]: [+]process-running ok Jan 23 06:56:44 crc kubenswrapper[5102]: healthz check failed Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.836152 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-d2tlh" podUID="825445e2-af9b-498b-afc5-3af404eeacf2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.850466 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-f4prk" event={"ID":"ca1d7d5e-03bb-4a23-85ae-0711da60bc42","Type":"ContainerStarted","Data":"6177b6323e409b917d66505e7c5e1b2bb076c121d15dc1252a47637843b73e61"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.859096 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-b59qh" event={"ID":"49c4be94-985a-4c36-bb76-9dc6cdb0da17","Type":"ContainerStarted","Data":"29cf7615cdb8eaa12d50dc58dd8f066e1d853dbfc621473618450adbf174ebbe"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.930196 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:44 crc kubenswrapper[5102]: E0123 06:56:44.930631 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.430614537 +0000 UTC m=+156.250963512 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.946398 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" podStartSLOduration=132.946378509 podStartE2EDuration="2m12.946378509s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:44.704062639 +0000 UTC m=+155.524411624" watchObservedRunningTime="2026-01-23 06:56:44.946378509 +0000 UTC m=+155.766727484" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.947526 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" podStartSLOduration=132.947518916 podStartE2EDuration="2m12.947518916s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:44.945130837 +0000 UTC m=+155.765479822" watchObservedRunningTime="2026-01-23 06:56:44.947518916 +0000 UTC m=+155.767867891" Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.947995 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" event={"ID":"5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe","Type":"ContainerStarted","Data":"3d90ec27c6aaad4f68df43a7414f5924e61ab3a1fb3834ffeb24aee231742d4d"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.948066 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" event={"ID":"5ac16b8d-73f7-4bcb-907e-6786d0aa2ebe","Type":"ContainerStarted","Data":"201ad8d9ad129dd0ac2578c5554b7dc29a5b29af1445af3a633c9dbd6e40694c"} Jan 23 06:56:44 crc kubenswrapper[5102]: I0123 06:56:44.999629 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" event={"ID":"01ee0518-e113-4fbd-af00-cb2b6d43baef","Type":"ContainerStarted","Data":"34ee348786dc8d642a858a6e1a2b856f6acbcb253154ff9004d3363b9fc3cfc2"} Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.026197 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" event={"ID":"cbba35c4-c6a7-44c7-9da3-acc7aad55b6a","Type":"ContainerStarted","Data":"838e2443d18f9da3f348c20e8f462e805bcfcd3f9400ed2b8132d49140e56654"} Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.028784 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" event={"ID":"7cf0a44b-e9bc-42da-8883-eb6c9a58f37e","Type":"ContainerStarted","Data":"7587797e4dc9abcfba56de45e3393922f16d7afe400413f375d2b6ae31681046"} Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.032304 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.033857 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.533832113 +0000 UTC m=+156.354181088 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.062984 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" event={"ID":"fb53c656-189e-4c35-94c5-f7ae81316c3c","Type":"ContainerStarted","Data":"dbf7b72ca20589fb68e0e53761aba8acbb3d5c00e7926296f031435449389a8d"} Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.063504 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.090123 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-485n8" podStartSLOduration=133.090082195 podStartE2EDuration="2m13.090082195s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.087018113 +0000 UTC m=+155.907367088" watchObservedRunningTime="2026-01-23 06:56:45.090082195 +0000 UTC m=+155.910431170" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.092044 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-b4t7z" podStartSLOduration=133.092034 podStartE2EDuration="2m13.092034s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.022310941 +0000 UTC m=+155.842659916" watchObservedRunningTime="2026-01-23 06:56:45.092034 +0000 UTC m=+155.912382975" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.102732 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" event={"ID":"b5d34f41-83c0-4ad5-a95a-977fbb5dd623","Type":"ContainerStarted","Data":"c69594f7bacd8028bc8fcb28e250973c4c6924f6f6e1f4782e23873998d23de8"} Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.125634 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-w2xbt" event={"ID":"2b3113af-a4c8-498d-b357-21038e8ff69e","Type":"ContainerStarted","Data":"f09854b887b0f904f3936229af49b616df78c6f2cf7b1e6efaefa58fe7c0971b"} Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.135352 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.137198 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.637180523 +0000 UTC m=+156.457529498 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.149249 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.153868 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-tsjs5" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.207801 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-k7ghh" podStartSLOduration=133.20777335 podStartE2EDuration="2m13.20777335s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.19023756 +0000 UTC m=+156.010586555" watchObservedRunningTime="2026-01-23 06:56:45.20777335 +0000 UTC m=+156.028122325" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.240451 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.241785 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.741750914 +0000 UTC m=+156.562100109 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.280380 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.342827 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.343313 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.843296725 +0000 UTC m=+156.663645700 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.347778 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-zrhhc" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.444068 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.444597 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.944554917 +0000 UTC m=+156.764903892 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.465041 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.466312 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:45.966292886 +0000 UTC m=+156.786641851 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.507157 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-shspc" podStartSLOduration=133.507129397 podStartE2EDuration="2m13.507129397s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.411149481 +0000 UTC m=+156.231498466" watchObservedRunningTime="2026-01-23 06:56:45.507129397 +0000 UTC m=+156.327478372" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.566138 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.566461 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.0664437 +0000 UTC m=+156.886792665 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.670509 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.670976 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.17095894 +0000 UTC m=+156.991307915 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.735284 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-ldkln" podStartSLOduration=133.735260868 podStartE2EDuration="2m13.735260868s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.512781194 +0000 UTC m=+156.333130189" watchObservedRunningTime="2026-01-23 06:56:45.735260868 +0000 UTC m=+156.555609843" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.771996 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.772410 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.272387327 +0000 UTC m=+157.092736302 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.777595 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" podStartSLOduration=133.777572648 podStartE2EDuration="2m13.777572648s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.738181734 +0000 UTC m=+156.558530709" watchObservedRunningTime="2026-01-23 06:56:45.777572648 +0000 UTC m=+156.597921623" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.834996 5102 patch_prober.go:28] interesting pod/router-default-5444994796-d2tlh container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Jan 23 06:56:45 crc kubenswrapper[5102]: [-]has-synced failed: reason withheld Jan 23 06:56:45 crc kubenswrapper[5102]: [+]process-running ok Jan 23 06:56:45 crc kubenswrapper[5102]: healthz check failed Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.835082 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-d2tlh" podUID="825445e2-af9b-498b-afc5-3af404eeacf2" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.874696 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.875518 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.375499079 +0000 UTC m=+157.195848054 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.889183 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-cxvms" podStartSLOduration=133.889160551 podStartE2EDuration="2m13.889160551s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.848132144 +0000 UTC m=+156.668481119" watchObservedRunningTime="2026-01-23 06:56:45.889160551 +0000 UTC m=+156.709509526" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.929860 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-7kv56" podStartSLOduration=133.929834527 podStartE2EDuration="2m13.929834527s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.887956661 +0000 UTC m=+156.708305636" watchObservedRunningTime="2026-01-23 06:56:45.929834527 +0000 UTC m=+156.750183502" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.931390 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-5q97t" podStartSLOduration=133.931377868 podStartE2EDuration="2m13.931377868s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.930979095 +0000 UTC m=+156.751328070" watchObservedRunningTime="2026-01-23 06:56:45.931377868 +0000 UTC m=+156.751726843" Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.976373 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.976631 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.476598425 +0000 UTC m=+157.296947410 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.976702 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:45 crc kubenswrapper[5102]: E0123 06:56:45.977195 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.477171113 +0000 UTC m=+157.297520088 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:45 crc kubenswrapper[5102]: I0123 06:56:45.986137 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-jlcpp" podStartSLOduration=133.98611839 podStartE2EDuration="2m13.98611839s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:45.985894652 +0000 UTC m=+156.806243627" watchObservedRunningTime="2026-01-23 06:56:45.98611839 +0000 UTC m=+156.806467365" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.077845 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.078004 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.57798504 +0000 UTC m=+157.398334015 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.078080 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.078372 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.578364623 +0000 UTC m=+157.398713598 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.080764 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-bj6m5" podStartSLOduration=134.080741561 podStartE2EDuration="2m14.080741561s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:46.078219828 +0000 UTC m=+156.898568803" watchObservedRunningTime="2026-01-23 06:56:46.080741561 +0000 UTC m=+156.901090536" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.128155 5102 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-2tts4 container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.19:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.128234 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" podUID="0e0359d2-c4c5-4165-aab9-9b21b5a3fbc7" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.19:5443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.141386 5102 generic.go:334] "Generic (PLEG): container finished" podID="ac5092a2-a268-42c6-98be-b902ae96f92f" containerID="a0bd276003e94b7d0d858491a9c0d57240f58f4dd4a30e9b41902cbb17e87fca" exitCode=0 Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.141464 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" event={"ID":"ac5092a2-a268-42c6-98be-b902ae96f92f","Type":"ContainerDied","Data":"a0bd276003e94b7d0d858491a9c0d57240f58f4dd4a30e9b41902cbb17e87fca"} Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.149469 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-w2xbt" event={"ID":"2b3113af-a4c8-498d-b357-21038e8ff69e","Type":"ContainerStarted","Data":"b1e6b78d0be48191a41c271fa9f3f3b7b56906cf61b8c7e59867997fa63a4a21"} Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.150198 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.159470 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" event={"ID":"3b3cdc03-9859-47b4-950a-3b01b64a11fb","Type":"ContainerStarted","Data":"47fae59527762691a23f87b959009f738f3e9372be86af728ca570f8ca3bb051"} Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.164342 5102 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-bwhrv container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.22:8080/healthz\": dial tcp 10.217.0.22:8080: connect: connection refused" start-of-body= Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.164418 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.22:8080/healthz\": dial tcp 10.217.0.22:8080: connect: connection refused" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.179098 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.179481 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.679465069 +0000 UTC m=+157.499814044 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.207516 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-w2xbt" podStartSLOduration=12.207490866 podStartE2EDuration="12.207490866s" podCreationTimestamp="2026-01-23 06:56:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:46.205202871 +0000 UTC m=+157.025551846" watchObservedRunningTime="2026-01-23 06:56:46.207490866 +0000 UTC m=+157.027839831" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.280858 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.285194 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.785174337 +0000 UTC m=+157.605523312 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.303027 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.303068 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.329202 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.382434 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.384319 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.884299848 +0000 UTC m=+157.704648823 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.409063 5102 patch_prober.go:28] interesting pod/downloads-7954f5f757-n5pkw container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.409181 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n5pkw" podUID="6ff670a5-d3e6-4fd5-97ad-2b07276283e8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.409362 5102 patch_prober.go:28] interesting pod/downloads-7954f5f757-n5pkw container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.409446 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n5pkw" podUID="6ff670a5-d3e6-4fd5-97ad-2b07276283e8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.484875 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.485311 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:46.985296861 +0000 UTC m=+157.805645836 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.586616 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.587190 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.087172683 +0000 UTC m=+157.907521648 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.689011 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.689521 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.189505569 +0000 UTC m=+158.009854544 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.770102 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.770185 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.793437 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.793948 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.293923936 +0000 UTC m=+158.114272911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.818795 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.832179 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.834418 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.868902 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-2tts4" Jan 23 06:56:46 crc kubenswrapper[5102]: I0123 06:56:46.896076 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:46 crc kubenswrapper[5102]: E0123 06:56:46.899513 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.399489059 +0000 UTC m=+158.219838034 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.055874 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.056581 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.556564318 +0000 UTC m=+158.376913293 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.163383 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.171760 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.671731819 +0000 UTC m=+158.492080784 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.222468 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" event={"ID":"3b3cdc03-9859-47b4-950a-3b01b64a11fb","Type":"ContainerStarted","Data":"30a95750ed090067d7a43271c631f39677e86ef6bdafa834c26bd2c798bf8c8d"} Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.233358 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-d2tlh" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.239636 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-4rpzc" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.265902 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.266956 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.76693296 +0000 UTC m=+158.587281935 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.334249 5102 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.369016 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.369465 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.869449133 +0000 UTC m=+158.689798108 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.399382 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-cxz8x"] Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.400626 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: W0123 06:56:47.420744 5102 reflector.go:561] object-"openshift-marketplace"/"community-operators-dockercfg-dmngl": failed to list *v1.Secret: secrets "community-operators-dockercfg-dmngl" is forbidden: User "system:node:crc" cannot list resource "secrets" in API group "" in the namespace "openshift-marketplace": no relationship found between node 'crc' and this object Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.420812 5102 reflector.go:158] "Unhandled Error" err="object-\"openshift-marketplace\"/\"community-operators-dockercfg-dmngl\": Failed to watch *v1.Secret: failed to list *v1.Secret: secrets \"community-operators-dockercfg-dmngl\" is forbidden: User \"system:node:crc\" cannot list resource \"secrets\" in API group \"\" in the namespace \"openshift-marketplace\": no relationship found between node 'crc' and this object" logger="UnhandledError" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.434509 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cxz8x"] Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.471352 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.471589 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-catalog-content\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.471630 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrf7z\" (UniqueName: \"kubernetes.io/projected/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-kube-api-access-xrf7z\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.471657 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-utilities\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.471830 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:47.971808031 +0000 UTC m=+158.792157006 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.573165 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrf7z\" (UniqueName: \"kubernetes.io/projected/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-kube-api-access-xrf7z\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.573679 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-utilities\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.573732 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.573782 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-catalog-content\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.574244 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-catalog-content\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.574413 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-utilities\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.574525 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:48.07451167 +0000 UTC m=+158.894860645 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.615643 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrf7z\" (UniqueName: \"kubernetes.io/projected/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-kube-api-access-xrf7z\") pod \"community-operators-cxz8x\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.680750 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.681314 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:48.181288574 +0000 UTC m=+159.001637549 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.689358 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sl6q6"] Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.690679 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.722021 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.726010 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sl6q6"] Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.785475 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.785583 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-catalog-content\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.785612 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qlgt\" (UniqueName: \"kubernetes.io/projected/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-kube-api-access-4qlgt\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.785639 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-utilities\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.786024 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2026-01-23 06:56:48.286008189 +0000 UTC m=+159.106357164 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-hgjw2" (UID: "25efae93-08ee-4c4a-88db-3faa88559398") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.804431 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.805689 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.818456 5102 patch_prober.go:28] interesting pod/console-f9d7485db-k7ghh container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.39:8443/health\": dial tcp 10.217.0.39:8443: connect: connection refused" start-of-body= Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.818572 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-k7ghh" podUID="4578060b-5283-42ea-aa38-c925d4265270" containerName="console" probeResult="failure" output="Get \"https://10.217.0.39:8443/health\": dial tcp 10.217.0.39:8443: connect: connection refused" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.832466 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-lwmjx"] Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.837450 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.865425 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lwmjx"] Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.886635 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.887110 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gr9qr\" (UniqueName: \"kubernetes.io/projected/599dad18-1d4a-415a-9fab-c8a5ea7521ed-kube-api-access-gr9qr\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.887153 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-catalog-content\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.887169 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qlgt\" (UniqueName: \"kubernetes.io/projected/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-kube-api-access-4qlgt\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.887190 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-utilities\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.887227 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-utilities\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.887304 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-catalog-content\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:47 crc kubenswrapper[5102]: E0123 06:56:47.888383 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2026-01-23 06:56:48.388358637 +0000 UTC m=+159.208707612 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.889579 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-catalog-content\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.890117 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-utilities\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.922925 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4qlgt\" (UniqueName: \"kubernetes.io/projected/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-kube-api-access-4qlgt\") pod \"certified-operators-sl6q6\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.968037 5102 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2026-01-23T06:56:47.334279259Z","Handler":null,"Name":""} Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.973128 5102 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.973762 5102 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.992355 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-utilities\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.992458 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-catalog-content\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.992710 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.992762 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gr9qr\" (UniqueName: \"kubernetes.io/projected/599dad18-1d4a-415a-9fab-c8a5ea7521ed-kube-api-access-gr9qr\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:47 crc kubenswrapper[5102]: I0123 06:56:47.993821 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-utilities\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.004676 5102 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.004734 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.022497 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.023379 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-catalog-content\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.040142 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-86qsn"] Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.040352 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gr9qr\" (UniqueName: \"kubernetes.io/projected/599dad18-1d4a-415a-9fab-c8a5ea7521ed-kube-api-access-gr9qr\") pod \"community-operators-lwmjx\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.053356 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-86qsn"] Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.053504 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.094447 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggqvw\" (UniqueName: \"kubernetes.io/projected/63c37b81-081b-4a4c-b448-26e58f97493b-kube-api-access-ggqvw\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.094588 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-utilities\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.094619 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-catalog-content\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.146763 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.153217 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-hgjw2\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.197825 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.197885 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5092a2-a268-42c6-98be-b902ae96f92f-secret-volume\") pod \"ac5092a2-a268-42c6-98be-b902ae96f92f\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.197958 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk69m\" (UniqueName: \"kubernetes.io/projected/ac5092a2-a268-42c6-98be-b902ae96f92f-kube-api-access-mk69m\") pod \"ac5092a2-a268-42c6-98be-b902ae96f92f\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.197996 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5092a2-a268-42c6-98be-b902ae96f92f-config-volume\") pod \"ac5092a2-a268-42c6-98be-b902ae96f92f\" (UID: \"ac5092a2-a268-42c6-98be-b902ae96f92f\") " Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.198217 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-utilities\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.198261 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-catalog-content\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.198301 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggqvw\" (UniqueName: \"kubernetes.io/projected/63c37b81-081b-4a4c-b448-26e58f97493b-kube-api-access-ggqvw\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.199994 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-utilities\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.200101 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-catalog-content\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.200750 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac5092a2-a268-42c6-98be-b902ae96f92f-config-volume" (OuterVolumeSpecName: "config-volume") pod "ac5092a2-a268-42c6-98be-b902ae96f92f" (UID: "ac5092a2-a268-42c6-98be-b902ae96f92f"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.207874 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac5092a2-a268-42c6-98be-b902ae96f92f-kube-api-access-mk69m" (OuterVolumeSpecName: "kube-api-access-mk69m") pod "ac5092a2-a268-42c6-98be-b902ae96f92f" (UID: "ac5092a2-a268-42c6-98be-b902ae96f92f"). InnerVolumeSpecName "kube-api-access-mk69m". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.208775 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac5092a2-a268-42c6-98be-b902ae96f92f-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ac5092a2-a268-42c6-98be-b902ae96f92f" (UID: "ac5092a2-a268-42c6-98be-b902ae96f92f"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.232996 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggqvw\" (UniqueName: \"kubernetes.io/projected/63c37b81-081b-4a4c-b448-26e58f97493b-kube-api-access-ggqvw\") pod \"certified-operators-86qsn\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.273106 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" event={"ID":"3b3cdc03-9859-47b4-950a-3b01b64a11fb","Type":"ContainerStarted","Data":"694aeb1c16ab129c1ad3db2d5cb3230de0e3f6653963a4bb5c48fec33bd357e5"} Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.275244 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.278273 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.278659 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.279505 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm" event={"ID":"ac5092a2-a268-42c6-98be-b902ae96f92f","Type":"ContainerDied","Data":"ca3fd4b1df6460002f67cd73b9f6e604e2bb8fbff75088a0d05ff96d78517637"} Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.279636 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ca3fd4b1df6460002f67cd73b9f6e604e2bb8fbff75088a0d05ff96d78517637" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.279936 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.285917 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.300628 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ac5092a2-a268-42c6-98be-b902ae96f92f-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.300919 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ac5092a2-a268-42c6-98be-b902ae96f92f-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.300932 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk69m\" (UniqueName: \"kubernetes.io/projected/ac5092a2-a268-42c6-98be-b902ae96f92f-kube-api-access-mk69m\") on node \"crc\" DevicePath \"\"" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.342340 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.348159 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-jcg67" podStartSLOduration=14.348139414 podStartE2EDuration="14.348139414s" podCreationTimestamp="2026-01-23 06:56:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:48.346983545 +0000 UTC m=+159.167332510" watchObservedRunningTime="2026-01-23 06:56:48.348139414 +0000 UTC m=+159.168488389" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.404029 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.755214 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 06:56:48 crc kubenswrapper[5102]: I0123 06:56:48.904971 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.305260 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 23 06:56:49 crc kubenswrapper[5102]: E0123 06:56:49.305442 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac5092a2-a268-42c6-98be-b902ae96f92f" containerName="collect-profiles" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.305453 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac5092a2-a268-42c6-98be-b902ae96f92f" containerName="collect-profiles" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.305562 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac5092a2-a268-42c6-98be-b902ae96f92f" containerName="collect-profiles" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.305897 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.424742 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.424941 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.425894 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.425921 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.435177 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sl6q6"] Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.444962 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-cxz8x"] Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.573235 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.574161 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.574456 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.574666 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.658572 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.690967 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.740048 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.789923 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b5948"] Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.793788 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.810286 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.863904 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5948"] Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.943758 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-hgjw2"] Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.969298 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-lwmjx"] Jan 23 06:56:49 crc kubenswrapper[5102]: W0123 06:56:49.977042 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod25efae93_08ee_4c4a_88db_3faa88559398.slice/crio-79cd91dc3c7ad61f39465edd4d205f17a820fd8ed9c526469c73cd41d6855945 WatchSource:0}: Error finding container 79cd91dc3c7ad61f39465edd4d205f17a820fd8ed9c526469c73cd41d6855945: Status 404 returned error can't find the container with id 79cd91dc3c7ad61f39465edd4d205f17a820fd8ed9c526469c73cd41d6855945 Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.992037 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-utilities\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.992083 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-catalog-content\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:49 crc kubenswrapper[5102]: I0123 06:56:49.992173 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jxphv\" (UniqueName: \"kubernetes.io/projected/0b589483-946f-4931-8bae-7b38e37682b3-kube-api-access-jxphv\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.031108 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-86qsn"] Jan 23 06:56:50 crc kubenswrapper[5102]: W0123 06:56:50.063777 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63c37b81_081b_4a4c_b448_26e58f97493b.slice/crio-4a2a06eb7625e8bc1918859d0053c34cd02d80550d61f55b5db559ad46516d3d WatchSource:0}: Error finding container 4a2a06eb7625e8bc1918859d0053c34cd02d80550d61f55b5db559ad46516d3d: Status 404 returned error can't find the container with id 4a2a06eb7625e8bc1918859d0053c34cd02d80550d61f55b5db559ad46516d3d Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.093238 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jxphv\" (UniqueName: \"kubernetes.io/projected/0b589483-946f-4931-8bae-7b38e37682b3-kube-api-access-jxphv\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.093310 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-utilities\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.093333 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-catalog-content\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.094075 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-catalog-content\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.094714 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-utilities\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.146347 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jxphv\" (UniqueName: \"kubernetes.io/projected/0b589483-946f-4931-8bae-7b38e37682b3-kube-api-access-jxphv\") pod \"redhat-marketplace-b5948\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.150799 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7h5pc"] Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.152323 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.174479 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7h5pc"] Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.200575 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h4gx5\" (UniqueName: \"kubernetes.io/projected/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-kube-api-access-h4gx5\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.200689 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-catalog-content\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.200732 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-utilities\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.301801 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-utilities\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.310913 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-catalog-content\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.311605 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h4gx5\" (UniqueName: \"kubernetes.io/projected/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-kube-api-access-h4gx5\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.302599 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-utilities\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.313262 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-catalog-content\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.316959 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerStarted","Data":"c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.318575 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerStarted","Data":"a57a335cbdc5941333c7dce761b3280e4cd5a67f5c2ae3471b380bfb129527b1"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.333960 5102 generic.go:334] "Generic (PLEG): container finished" podID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerID="f8424f91b6b63e7dcd121a227f98ec294243665d697937cef52d38d305f69d80" exitCode=0 Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.334052 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cxz8x" event={"ID":"a67b60a8-bbb4-471d-a0d5-da47ec4819d2","Type":"ContainerDied","Data":"f8424f91b6b63e7dcd121a227f98ec294243665d697937cef52d38d305f69d80"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.334087 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cxz8x" event={"ID":"a67b60a8-bbb4-471d-a0d5-da47ec4819d2","Type":"ContainerStarted","Data":"398c7f1cb2372b6868f22e6e0692d81d830c9c91f449fa0fe6dc038c72ff8458"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.343098 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" event={"ID":"25efae93-08ee-4c4a-88db-3faa88559398","Type":"ContainerStarted","Data":"b55105cf116daf83d3792a8e4a87e9cdbf81c6d461e2376dba87eb8a97a7f113"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.343453 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" event={"ID":"25efae93-08ee-4c4a-88db-3faa88559398","Type":"ContainerStarted","Data":"79cd91dc3c7ad61f39465edd4d205f17a820fd8ed9c526469c73cd41d6855945"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.343117 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.344378 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.346433 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h4gx5\" (UniqueName: \"kubernetes.io/projected/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-kube-api-access-h4gx5\") pod \"redhat-marketplace-7h5pc\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.348057 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86qsn" event={"ID":"63c37b81-081b-4a4c-b448-26e58f97493b","Type":"ContainerStarted","Data":"4a2a06eb7625e8bc1918859d0053c34cd02d80550d61f55b5db559ad46516d3d"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.350166 5102 generic.go:334] "Generic (PLEG): container finished" podID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerID="54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61" exitCode=0 Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.350198 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sl6q6" event={"ID":"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d","Type":"ContainerDied","Data":"54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.350215 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sl6q6" event={"ID":"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d","Type":"ContainerStarted","Data":"68ea154e577a2e7993f2a594af997e1dfd8df90c5eddb469ad8863b8bd7c6055"} Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.370667 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.435375 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" podStartSLOduration=138.43535421 podStartE2EDuration="2m18.43535421s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:50.408946936 +0000 UTC m=+161.229295921" watchObservedRunningTime="2026-01-23 06:56:50.43535421 +0000 UTC m=+161.255703185" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.453677 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Jan 23 06:56:50 crc kubenswrapper[5102]: W0123 06:56:50.467520 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod6ed3af40_55c0_41a2_b5cb_21bb9c60ef67.slice/crio-6fe7121778769d0729848f3fd77b1acbe56942ec308b6ab9f927c24215ce4d40 WatchSource:0}: Error finding container 6fe7121778769d0729848f3fd77b1acbe56942ec308b6ab9f927c24215ce4d40: Status 404 returned error can't find the container with id 6fe7121778769d0729848f3fd77b1acbe56942ec308b6ab9f927c24215ce4d40 Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.513482 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.578521 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nwnmg"] Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.579589 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.581978 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.591913 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nwnmg"] Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.786877 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-8hw9t"] Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.790175 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.797218 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8hw9t"] Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.818142 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-utilities\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.818277 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-catalog-content\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.818480 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2rzfz\" (UniqueName: \"kubernetes.io/projected/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-kube-api-access-2rzfz\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.919459 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2rzfz\" (UniqueName: \"kubernetes.io/projected/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-kube-api-access-2rzfz\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.919531 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-utilities\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.919566 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-utilities\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.919615 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-g8r4x\" (UniqueName: \"kubernetes.io/projected/1b442d58-fe80-472a-a33e-ec4e15eadd8c-kube-api-access-g8r4x\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.919649 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-catalog-content\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.919688 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-catalog-content\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.921036 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-utilities\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.921111 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-catalog-content\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:50 crc kubenswrapper[5102]: I0123 06:56:50.955903 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2rzfz\" (UniqueName: \"kubernetes.io/projected/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-kube-api-access-2rzfz\") pod \"redhat-operators-nwnmg\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.021398 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-g8r4x\" (UniqueName: \"kubernetes.io/projected/1b442d58-fe80-472a-a33e-ec4e15eadd8c-kube-api-access-g8r4x\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.021736 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-catalog-content\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.021801 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-utilities\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.022316 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-utilities\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.022913 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-catalog-content\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.058746 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-g8r4x\" (UniqueName: \"kubernetes.io/projected/1b442d58-fe80-472a-a33e-ec4e15eadd8c-kube-api-access-g8r4x\") pod \"redhat-operators-8hw9t\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.089619 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5948"] Jan 23 06:56:51 crc kubenswrapper[5102]: W0123 06:56:51.097297 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0b589483_946f_4931_8bae_7b38e37682b3.slice/crio-c0c506e4454b3c1f305b8d5d60c8c78afd7552117af76ac7f3ce7d3cce334e2e WatchSource:0}: Error finding container c0c506e4454b3c1f305b8d5d60c8c78afd7552117af76ac7f3ce7d3cce334e2e: Status 404 returned error can't find the container with id c0c506e4454b3c1f305b8d5d60c8c78afd7552117af76ac7f3ce7d3cce334e2e Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.125934 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.164684 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.248255 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7h5pc"] Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.363223 5102 generic.go:334] "Generic (PLEG): container finished" podID="63c37b81-081b-4a4c-b448-26e58f97493b" containerID="27bab8a3525adbe865aeaf6d5200e21a90d56e7c7bd39b3f0407dd32cfff9c71" exitCode=0 Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.363523 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86qsn" event={"ID":"63c37b81-081b-4a4c-b448-26e58f97493b","Type":"ContainerDied","Data":"27bab8a3525adbe865aeaf6d5200e21a90d56e7c7bd39b3f0407dd32cfff9c71"} Jan 23 06:56:51 crc kubenswrapper[5102]: W0123 06:56:51.364162 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf7bf48e9_2a76_4bc6_ab13_0fe41c736aae.slice/crio-03e3d341abe59abaa9e2e8609578a6fd86e1aad3322af11a8e64c2b807eed26a WatchSource:0}: Error finding container 03e3d341abe59abaa9e2e8609578a6fd86e1aad3322af11a8e64c2b807eed26a: Status 404 returned error can't find the container with id 03e3d341abe59abaa9e2e8609578a6fd86e1aad3322af11a8e64c2b807eed26a Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.365284 5102 generic.go:334] "Generic (PLEG): container finished" podID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerID="c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c" exitCode=0 Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.365406 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerDied","Data":"c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c"} Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.369022 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67","Type":"ContainerStarted","Data":"6fe7121778769d0729848f3fd77b1acbe56942ec308b6ab9f927c24215ce4d40"} Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.389090 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5948" event={"ID":"0b589483-946f-4931-8bae-7b38e37682b3","Type":"ContainerStarted","Data":"c0c506e4454b3c1f305b8d5d60c8c78afd7552117af76ac7f3ce7d3cce334e2e"} Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.535714 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-8hw9t"] Jan 23 06:56:51 crc kubenswrapper[5102]: W0123 06:56:51.558027 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1b442d58_fe80_472a_a33e_ec4e15eadd8c.slice/crio-d14e85fe10b701f0d52a195c9f592f7c5b660601491209d55bfd7443333e0f91 WatchSource:0}: Error finding container d14e85fe10b701f0d52a195c9f592f7c5b660601491209d55bfd7443333e0f91: Status 404 returned error can't find the container with id d14e85fe10b701f0d52a195c9f592f7c5b660601491209d55bfd7443333e0f91 Jan 23 06:56:51 crc kubenswrapper[5102]: I0123 06:56:51.759828 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nwnmg"] Jan 23 06:56:51 crc kubenswrapper[5102]: W0123 06:56:51.772205 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod45d96a0d_f31b_4afb_a528_58d7bbe8fe1b.slice/crio-947c50eacd7a7ab68ee01442471eb06cc525ac3be54ec2c01a6601be96898321 WatchSource:0}: Error finding container 947c50eacd7a7ab68ee01442471eb06cc525ac3be54ec2c01a6601be96898321: Status 404 returned error can't find the container with id 947c50eacd7a7ab68ee01442471eb06cc525ac3be54ec2c01a6601be96898321 Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.407311 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerStarted","Data":"9a6273dc9aa78886f03e9356b92e462a9a30a6150ecd7acfc56209c5988e2353"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.407369 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerStarted","Data":"d14e85fe10b701f0d52a195c9f592f7c5b660601491209d55bfd7443333e0f91"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.413111 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67","Type":"ContainerStarted","Data":"f12325844383bd3cb190427659f29d97f99b6937b42573e918a5e4ec22470d24"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.415227 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5948" event={"ID":"0b589483-946f-4931-8bae-7b38e37682b3","Type":"ContainerStarted","Data":"227c901cda76bb24eebfaf4aaaef7c3fc1700d174d5cfc0f9c3a0d31f61bddfa"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.419907 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerStarted","Data":"ab4359e692538395e31eb783670b96649f5d4383f6c90a0424f32d146a1096b3"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.419958 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerStarted","Data":"947c50eacd7a7ab68ee01442471eb06cc525ac3be54ec2c01a6601be96898321"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.435741 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7h5pc" event={"ID":"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae","Type":"ContainerStarted","Data":"4739ed672c6b24d9cd7d1d7d88ca98a171e3b5197eb8ace6f558f23e94774a40"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.435791 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7h5pc" event={"ID":"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae","Type":"ContainerStarted","Data":"03e3d341abe59abaa9e2e8609578a6fd86e1aad3322af11a8e64c2b807eed26a"} Jan 23 06:56:52 crc kubenswrapper[5102]: I0123 06:56:52.455681 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.455663123 podStartE2EDuration="3.455663123s" podCreationTimestamp="2026-01-23 06:56:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:52.451829176 +0000 UTC m=+163.272178151" watchObservedRunningTime="2026-01-23 06:56:52.455663123 +0000 UTC m=+163.276012098" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.086172 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.087575 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.093442 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.094056 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.102231 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.221746 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.221870 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.323289 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.323400 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.323567 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.364433 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.420972 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.472735 5102 generic.go:334] "Generic (PLEG): container finished" podID="6ed3af40-55c0-41a2-b5cb-21bb9c60ef67" containerID="f12325844383bd3cb190427659f29d97f99b6937b42573e918a5e4ec22470d24" exitCode=0 Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.472850 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67","Type":"ContainerDied","Data":"f12325844383bd3cb190427659f29d97f99b6937b42573e918a5e4ec22470d24"} Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.502288 5102 generic.go:334] "Generic (PLEG): container finished" podID="0b589483-946f-4931-8bae-7b38e37682b3" containerID="227c901cda76bb24eebfaf4aaaef7c3fc1700d174d5cfc0f9c3a0d31f61bddfa" exitCode=0 Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.502366 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5948" event={"ID":"0b589483-946f-4931-8bae-7b38e37682b3","Type":"ContainerDied","Data":"227c901cda76bb24eebfaf4aaaef7c3fc1700d174d5cfc0f9c3a0d31f61bddfa"} Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.525982 5102 generic.go:334] "Generic (PLEG): container finished" podID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerID="ab4359e692538395e31eb783670b96649f5d4383f6c90a0424f32d146a1096b3" exitCode=0 Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.526092 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerDied","Data":"ab4359e692538395e31eb783670b96649f5d4383f6c90a0424f32d146a1096b3"} Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.531256 5102 generic.go:334] "Generic (PLEG): container finished" podID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerID="4739ed672c6b24d9cd7d1d7d88ca98a171e3b5197eb8ace6f558f23e94774a40" exitCode=0 Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.531310 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7h5pc" event={"ID":"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae","Type":"ContainerDied","Data":"4739ed672c6b24d9cd7d1d7d88ca98a171e3b5197eb8ace6f558f23e94774a40"} Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.536734 5102 generic.go:334] "Generic (PLEG): container finished" podID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerID="9a6273dc9aa78886f03e9356b92e462a9a30a6150ecd7acfc56209c5988e2353" exitCode=0 Jan 23 06:56:53 crc kubenswrapper[5102]: I0123 06:56:53.536783 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerDied","Data":"9a6273dc9aa78886f03e9356b92e462a9a30a6150ecd7acfc56209c5988e2353"} Jan 23 06:56:54 crc kubenswrapper[5102]: I0123 06:56:54.707874 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.212778 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.292942 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "6ed3af40-55c0-41a2-b5cb-21bb9c60ef67" (UID: "6ed3af40-55c0-41a2-b5cb-21bb9c60ef67"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.293025 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kubelet-dir\") pod \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.293158 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kube-api-access\") pod \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\" (UID: \"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67\") " Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.293469 5102 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.392887 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "6ed3af40-55c0-41a2-b5cb-21bb9c60ef67" (UID: "6ed3af40-55c0-41a2-b5cb-21bb9c60ef67"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.396659 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/6ed3af40-55c0-41a2-b5cb-21bb9c60ef67-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.555880 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4e918e61-37b4-4fb0-9276-07b2cfd113fb","Type":"ContainerStarted","Data":"f5258cb488feafef6e796c3f071cc6cb793ff0247d7e8a1635b2a872b91fb41b"} Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.560451 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"6ed3af40-55c0-41a2-b5cb-21bb9c60ef67","Type":"ContainerDied","Data":"6fe7121778769d0729848f3fd77b1acbe56942ec308b6ab9f927c24215ce4d40"} Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.560492 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6fe7121778769d0729848f3fd77b1acbe56942ec308b6ab9f927c24215ce4d40" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.560582 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.644295 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-w2xbt" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.701887 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.717390 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/a7d383f6-0729-4590-8252-46e50ea8ece8-metrics-certs\") pod \"network-metrics-daemon-rmkhl\" (UID: \"a7d383f6-0729-4590-8252-46e50ea8ece8\") " pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:55 crc kubenswrapper[5102]: I0123 06:56:55.960745 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-rmkhl" Jan 23 06:56:56 crc kubenswrapper[5102]: I0123 06:56:56.402840 5102 patch_prober.go:28] interesting pod/downloads-7954f5f757-n5pkw container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 06:56:56 crc kubenswrapper[5102]: I0123 06:56:56.403287 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-n5pkw" podUID="6ff670a5-d3e6-4fd5-97ad-2b07276283e8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 06:56:56 crc kubenswrapper[5102]: I0123 06:56:56.406631 5102 patch_prober.go:28] interesting pod/downloads-7954f5f757-n5pkw container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" start-of-body= Jan 23 06:56:56 crc kubenswrapper[5102]: I0123 06:56:56.406671 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-n5pkw" podUID="6ff670a5-d3e6-4fd5-97ad-2b07276283e8" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.15:8080/\": dial tcp 10.217.0.15:8080: connect: connection refused" Jan 23 06:56:57 crc kubenswrapper[5102]: I0123 06:56:57.731247 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-rmkhl"] Jan 23 06:56:57 crc kubenswrapper[5102]: I0123 06:56:57.839261 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:57 crc kubenswrapper[5102]: I0123 06:56:57.844073 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 06:56:58 crc kubenswrapper[5102]: I0123 06:56:58.041102 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" event={"ID":"a7d383f6-0729-4590-8252-46e50ea8ece8","Type":"ContainerStarted","Data":"aee1abe7917df229650a4727255212021ec21592a43ec77c87726059439f0856"} Jan 23 06:56:58 crc kubenswrapper[5102]: I0123 06:56:58.091923 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4e918e61-37b4-4fb0-9276-07b2cfd113fb","Type":"ContainerStarted","Data":"a878f1cbb6b58be1f1d3dba027bb8efaf1275fb42f4e78b8b0e213696ef3d94d"} Jan 23 06:56:58 crc kubenswrapper[5102]: I0123 06:56:58.116178 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=5.116148639 podStartE2EDuration="5.116148639s" podCreationTimestamp="2026-01-23 06:56:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:56:58.115818399 +0000 UTC m=+168.936167374" watchObservedRunningTime="2026-01-23 06:56:58.116148639 +0000 UTC m=+168.936497614" Jan 23 06:56:59 crc kubenswrapper[5102]: I0123 06:56:59.111406 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" event={"ID":"a7d383f6-0729-4590-8252-46e50ea8ece8","Type":"ContainerStarted","Data":"c21acb058d13e9cc941c26dff550bfa3974cbd36141177e26528c1736541d6f8"} Jan 23 06:56:59 crc kubenswrapper[5102]: I0123 06:56:59.117973 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4e918e61-37b4-4fb0-9276-07b2cfd113fb","Type":"ContainerDied","Data":"a878f1cbb6b58be1f1d3dba027bb8efaf1275fb42f4e78b8b0e213696ef3d94d"} Jan 23 06:56:59 crc kubenswrapper[5102]: I0123 06:56:59.117963 5102 generic.go:334] "Generic (PLEG): container finished" podID="4e918e61-37b4-4fb0-9276-07b2cfd113fb" containerID="a878f1cbb6b58be1f1d3dba027bb8efaf1275fb42f4e78b8b0e213696ef3d94d" exitCode=0 Jan 23 06:57:00 crc kubenswrapper[5102]: I0123 06:57:00.139619 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-rmkhl" event={"ID":"a7d383f6-0729-4590-8252-46e50ea8ece8","Type":"ContainerStarted","Data":"39b1e63ebb9e51db67faaf56a6d9e555d0603d8f1d352b32d21acd70017fe30f"} Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.263651 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.301765 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-rmkhl" podStartSLOduration=149.301733798 podStartE2EDuration="2m29.301733798s" podCreationTimestamp="2026-01-23 06:54:32 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:57:00.167731258 +0000 UTC m=+170.988080233" watchObservedRunningTime="2026-01-23 06:57:01.301733798 +0000 UTC m=+172.122082773" Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.347821 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kube-api-access\") pod \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.347950 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kubelet-dir\") pod \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\" (UID: \"4e918e61-37b4-4fb0-9276-07b2cfd113fb\") " Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.348050 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4e918e61-37b4-4fb0-9276-07b2cfd113fb" (UID: "4e918e61-37b4-4fb0-9276-07b2cfd113fb"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.348324 5102 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.382778 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4e918e61-37b4-4fb0-9276-07b2cfd113fb" (UID: "4e918e61-37b4-4fb0-9276-07b2cfd113fb"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:57:01 crc kubenswrapper[5102]: I0123 06:57:01.449187 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4e918e61-37b4-4fb0-9276-07b2cfd113fb-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:02 crc kubenswrapper[5102]: I0123 06:57:02.167955 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"4e918e61-37b4-4fb0-9276-07b2cfd113fb","Type":"ContainerDied","Data":"f5258cb488feafef6e796c3f071cc6cb793ff0247d7e8a1635b2a872b91fb41b"} Jan 23 06:57:02 crc kubenswrapper[5102]: I0123 06:57:02.168216 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f5258cb488feafef6e796c3f071cc6cb793ff0247d7e8a1635b2a872b91fb41b" Jan 23 06:57:02 crc kubenswrapper[5102]: I0123 06:57:02.168051 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Jan 23 06:57:06 crc kubenswrapper[5102]: I0123 06:57:06.421714 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-n5pkw" Jan 23 06:57:08 crc kubenswrapper[5102]: I0123 06:57:08.356695 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 06:57:15 crc kubenswrapper[5102]: I0123 06:57:15.854728 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Jan 23 06:57:16 crc kubenswrapper[5102]: I0123 06:57:16.768465 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 06:57:16 crc kubenswrapper[5102]: I0123 06:57:16.768598 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 06:57:16 crc kubenswrapper[5102]: I0123 06:57:16.828918 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-8wlt9" Jan 23 06:57:20 crc kubenswrapper[5102]: I0123 06:57:20.730389 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-wqtx6"] Jan 23 06:57:20 crc kubenswrapper[5102]: I0123 06:57:20.731132 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerName="controller-manager" containerID="cri-o://cb4b4d326a1d45ed1bbf4d7721ccc2f35c41fc3d192eb751b84fcc1b2478c19c" gracePeriod=30 Jan 23 06:57:20 crc kubenswrapper[5102]: I0123 06:57:20.820312 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5"] Jan 23 06:57:20 crc kubenswrapper[5102]: I0123 06:57:20.820593 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" containerID="cri-o://9b422137ba595f63447da25ea697c65b1dc59450da109f57b7300fd4f80daa7a" gracePeriod=30 Jan 23 06:57:22 crc kubenswrapper[5102]: I0123 06:57:22.450975 5102 generic.go:334] "Generic (PLEG): container finished" podID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerID="cb4b4d326a1d45ed1bbf4d7721ccc2f35c41fc3d192eb751b84fcc1b2478c19c" exitCode=0 Jan 23 06:57:22 crc kubenswrapper[5102]: I0123 06:57:22.451086 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" event={"ID":"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3","Type":"ContainerDied","Data":"cb4b4d326a1d45ed1bbf4d7721ccc2f35c41fc3d192eb751b84fcc1b2478c19c"} Jan 23 06:57:24 crc kubenswrapper[5102]: I0123 06:57:24.477497 5102 generic.go:334] "Generic (PLEG): container finished" podID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerID="9b422137ba595f63447da25ea697c65b1dc59450da109f57b7300fd4f80daa7a" exitCode=0 Jan 23 06:57:24 crc kubenswrapper[5102]: I0123 06:57:24.477571 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" event={"ID":"7e288133-e5c7-46c6-b29a-530d8a1c7aca","Type":"ContainerDied","Data":"9b422137ba595f63447da25ea697c65b1dc59450da109f57b7300fd4f80daa7a"} Jan 23 06:57:26 crc kubenswrapper[5102]: I0123 06:57:26.252190 5102 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-wqtx6 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" start-of-body= Jan 23 06:57:26 crc kubenswrapper[5102]: I0123 06:57:26.252573 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": dial tcp 10.217.0.9:8443: connect: connection refused" Jan 23 06:57:26 crc kubenswrapper[5102]: I0123 06:57:26.551856 5102 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lttd5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" start-of-body= Jan 23 06:57:26 crc kubenswrapper[5102]: I0123 06:57:26.551924 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": dial tcp 10.217.0.7:8443: connect: connection refused" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.725921 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 23 06:57:29 crc kubenswrapper[5102]: E0123 06:57:29.727173 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4e918e61-37b4-4fb0-9276-07b2cfd113fb" containerName="pruner" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.727203 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4e918e61-37b4-4fb0-9276-07b2cfd113fb" containerName="pruner" Jan 23 06:57:29 crc kubenswrapper[5102]: E0123 06:57:29.727245 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6ed3af40-55c0-41a2-b5cb-21bb9c60ef67" containerName="pruner" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.727261 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6ed3af40-55c0-41a2-b5cb-21bb9c60ef67" containerName="pruner" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.727475 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4e918e61-37b4-4fb0-9276-07b2cfd113fb" containerName="pruner" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.727510 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6ed3af40-55c0-41a2-b5cb-21bb9c60ef67" containerName="pruner" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.774569 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.774732 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.784742 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.785003 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.825563 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.825633 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.927637 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.927721 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.927850 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:29 crc kubenswrapper[5102]: I0123 06:57:29.953460 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:30 crc kubenswrapper[5102]: I0123 06:57:30.120113 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:32 crc kubenswrapper[5102]: E0123 06:57:32.714041 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 23 06:57:32 crc kubenswrapper[5102]: E0123 06:57:32.714686 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xrf7z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-cxz8x_openshift-marketplace(a67b60a8-bbb4-471d-a0d5-da47ec4819d2): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:32 crc kubenswrapper[5102]: E0123 06:57:32.715853 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-cxz8x" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.679664 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.682879 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.702246 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.845102 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c50c373e-478c-4132-9fe4-883f61e7e308-kube-api-access\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.845189 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-var-lock\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.845382 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-kubelet-dir\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.947575 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-kubelet-dir\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.947643 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c50c373e-478c-4132-9fe4-883f61e7e308-kube-api-access\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.947694 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-var-lock\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.947786 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-var-lock\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.947775 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-kubelet-dir\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: I0123 06:57:33.970604 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c50c373e-478c-4132-9fe4-883f61e7e308-kube-api-access\") pod \"installer-9-crc\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:33 crc kubenswrapper[5102]: E0123 06:57:33.981688 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-cxz8x" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" Jan 23 06:57:34 crc kubenswrapper[5102]: I0123 06:57:34.016080 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:57:34 crc kubenswrapper[5102]: E0123 06:57:34.048950 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 23 06:57:34 crc kubenswrapper[5102]: E0123 06:57:34.049226 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-h4gx5,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-7h5pc_openshift-marketplace(f7bf48e9-2a76-4bc6-ab13-0fe41c736aae): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:34 crc kubenswrapper[5102]: E0123 06:57:34.050474 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-7h5pc" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" Jan 23 06:57:37 crc kubenswrapper[5102]: I0123 06:57:37.253443 5102 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-wqtx6 container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.9:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 06:57:37 crc kubenswrapper[5102]: I0123 06:57:37.254135 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.9:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 06:57:37 crc kubenswrapper[5102]: I0123 06:57:37.551583 5102 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-lttd5 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 06:57:37 crc kubenswrapper[5102]: I0123 06:57:37.551718 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.7:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 06:57:38 crc kubenswrapper[5102]: E0123 06:57:38.556173 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-7h5pc" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" Jan 23 06:57:38 crc kubenswrapper[5102]: E0123 06:57:38.627298 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 23 06:57:38 crc kubenswrapper[5102]: E0123 06:57:38.627571 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-g8r4x,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-8hw9t_openshift-marketplace(1b442d58-fe80-472a-a33e-ec4e15eadd8c): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:38 crc kubenswrapper[5102]: E0123 06:57:38.628859 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-8hw9t" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.147353 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-8hw9t" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.271347 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.271963 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4qlgt,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-sl6q6_openshift-marketplace(2e9105da-eb2c-4ead-96d7-2ca6c190ef0d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.273652 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.273902 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-sl6q6" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.282824 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.356481 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.356646 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-2rzfz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-nwnmg_openshift-marketplace(45d96a0d-f31b-4afb-a528-58d7bbe8fe1b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.358028 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-nwnmg" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.358726 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.358812 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-gr9qr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-lwmjx_openshift-marketplace(599dad18-1d4a-415a-9fab-c8a5ea7521ed): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.360534 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-lwmjx" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.397709 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.398179 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-jxphv,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-b5948_openshift-marketplace(0b589483-946f-4931-8bae-7b38e37682b3): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.399508 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-b5948" podUID="0b589483-946f-4931-8bae-7b38e37682b3" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.421899 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.422058 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ggqvw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-86qsn_openshift-marketplace(63c37b81-081b-4a4c-b448-26e58f97493b): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.423211 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-86qsn" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.430906 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-config\") pod \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432256 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-config" (OuterVolumeSpecName: "config") pod "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" (UID: "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432409 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-client-ca\") pod \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432565 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-proxy-ca-bundles\") pod \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432647 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-config\") pod \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432695 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e288133-e5c7-46c6-b29a-530d8a1c7aca-serving-cert\") pod \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432752 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gxgf7\" (UniqueName: \"kubernetes.io/projected/7e288133-e5c7-46c6-b29a-530d8a1c7aca-kube-api-access-gxgf7\") pod \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432790 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-88h58\" (UniqueName: \"kubernetes.io/projected/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-kube-api-access-88h58\") pod \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.432844 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-serving-cert\") pod \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\" (UID: \"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.433213 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-client-ca" (OuterVolumeSpecName: "client-ca") pod "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" (UID: "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.433293 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-client-ca\") pod \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\" (UID: \"7e288133-e5c7-46c6-b29a-530d8a1c7aca\") " Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.433512 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-config" (OuterVolumeSpecName: "config") pod "7e288133-e5c7-46c6-b29a-530d8a1c7aca" (UID: "7e288133-e5c7-46c6-b29a-530d8a1c7aca"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.433615 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.433656 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.433667 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.434132 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-client-ca" (OuterVolumeSpecName: "client-ca") pod "7e288133-e5c7-46c6-b29a-530d8a1c7aca" (UID: "7e288133-e5c7-46c6-b29a-530d8a1c7aca"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.434567 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" (UID: "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.442254 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7e288133-e5c7-46c6-b29a-530d8a1c7aca-kube-api-access-gxgf7" (OuterVolumeSpecName: "kube-api-access-gxgf7") pod "7e288133-e5c7-46c6-b29a-530d8a1c7aca" (UID: "7e288133-e5c7-46c6-b29a-530d8a1c7aca"). InnerVolumeSpecName "kube-api-access-gxgf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.443329 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7e288133-e5c7-46c6-b29a-530d8a1c7aca-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7e288133-e5c7-46c6-b29a-530d8a1c7aca" (UID: "7e288133-e5c7-46c6-b29a-530d8a1c7aca"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.444468 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" (UID: "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.444564 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-kube-api-access-88h58" (OuterVolumeSpecName: "kube-api-access-88h58") pod "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" (UID: "f97b20e0-a9b8-4aaa-a61f-33a195eb11a3"). InnerVolumeSpecName "kube-api-access-88h58". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.507824 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.534998 5102 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.535033 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7e288133-e5c7-46c6-b29a-530d8a1c7aca-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.535042 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gxgf7\" (UniqueName: \"kubernetes.io/projected/7e288133-e5c7-46c6-b29a-530d8a1c7aca-kube-api-access-gxgf7\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.535054 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-88h58\" (UniqueName: \"kubernetes.io/projected/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-kube-api-access-88h58\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.535063 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.535072 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7e288133-e5c7-46c6-b29a-530d8a1c7aca-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.557575 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.681818 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" event={"ID":"f97b20e0-a9b8-4aaa-a61f-33a195eb11a3","Type":"ContainerDied","Data":"743bd99929be615ef920bab053356d055d6716e2be7165f265e73e4db132f35e"} Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.681882 5102 scope.go:117] "RemoveContainer" containerID="cb4b4d326a1d45ed1bbf4d7721ccc2f35c41fc3d192eb751b84fcc1b2478c19c" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.682015 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-wqtx6" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.685156 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c50c373e-478c-4132-9fe4-883f61e7e308","Type":"ContainerStarted","Data":"e848f0c388bcb167d8e2bac8bc54424002f199ec44181408ecd0ed275e7777bb"} Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.688675 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d4fd1abf-1877-484e-88c6-e4139db1c7b3","Type":"ContainerStarted","Data":"fef8f6a5fb94eda4744d41e26e7ccb923120290dc4c504c905cd0e7ae50b1ffe"} Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.690921 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" event={"ID":"7e288133-e5c7-46c6-b29a-530d8a1c7aca","Type":"ContainerDied","Data":"3a82978a969128ba2fcad518b0d175d27a59c372b27011178704b6a7af8d7459"} Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.691698 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.693925 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-lwmjx" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.693952 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-b5948" podUID="0b589483-946f-4931-8bae-7b38e37682b3" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.699134 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-sl6q6" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.706002 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-86qsn" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" Jan 23 06:57:40 crc kubenswrapper[5102]: E0123 06:57:40.707023 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-nwnmg" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.714579 5102 scope.go:117] "RemoveContainer" containerID="9b422137ba595f63447da25ea697c65b1dc59450da109f57b7300fd4f80daa7a" Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.801743 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5"] Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.805954 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-lttd5"] Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.813654 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-wqtx6"] Jan 23 06:57:40 crc kubenswrapper[5102]: I0123 06:57:40.818365 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-wqtx6"] Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.609152 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" path="/var/lib/kubelet/pods/7e288133-e5c7-46c6-b29a-530d8a1c7aca/volumes" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.609948 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" path="/var/lib/kubelet/pods/f97b20e0-a9b8-4aaa-a61f-33a195eb11a3/volumes" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.702368 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c50c373e-478c-4132-9fe4-883f61e7e308","Type":"ContainerStarted","Data":"5194b76da2edc01c2a826f50a8879b06791472b21982a285a55059f05363d2c7"} Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.703923 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d4fd1abf-1877-484e-88c6-e4139db1c7b3","Type":"ContainerStarted","Data":"c87082c1360bda953d9a05442df6b51bf4b97a29913f0faceedbae65cb87d514"} Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.718881 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=8.718851058 podStartE2EDuration="8.718851058s" podCreationTimestamp="2026-01-23 06:57:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:57:41.717924787 +0000 UTC m=+212.538273762" watchObservedRunningTime="2026-01-23 06:57:41.718851058 +0000 UTC m=+212.539200033" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.741108 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-9-crc" podStartSLOduration=12.741087434 podStartE2EDuration="12.741087434s" podCreationTimestamp="2026-01-23 06:57:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:57:41.737467484 +0000 UTC m=+212.557816459" watchObservedRunningTime="2026-01-23 06:57:41.741087434 +0000 UTC m=+212.561436409" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.814497 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx"] Jan 23 06:57:41 crc kubenswrapper[5102]: E0123 06:57:41.814886 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerName="controller-manager" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.814906 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerName="controller-manager" Jan 23 06:57:41 crc kubenswrapper[5102]: E0123 06:57:41.814918 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.814925 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.815026 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="7e288133-e5c7-46c6-b29a-530d8a1c7aca" containerName="route-controller-manager" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.815042 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f97b20e0-a9b8-4aaa-a61f-33a195eb11a3" containerName="controller-manager" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.815748 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.820346 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-6484fcb57b-bhzfs"] Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.821379 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.821621 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.822012 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.822225 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.823620 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.827780 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.828935 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.836200 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.836498 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.837068 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.837291 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.840394 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx"] Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.840662 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 23 06:57:41 crc kubenswrapper[5102]: I0123 06:57:41.840864 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.124386 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.131934 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6484fcb57b-bhzfs"] Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.226567 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-proxy-ca-bundles\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.227503 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f135d4c-6abc-470e-a800-189cc1f887d1-serving-cert\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.227563 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qx2jc\" (UniqueName: \"kubernetes.io/projected/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-kube-api-access-qx2jc\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.227651 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-serving-cert\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.227689 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jm66f\" (UniqueName: \"kubernetes.io/projected/8f135d4c-6abc-470e-a800-189cc1f887d1-kube-api-access-jm66f\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.227716 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-config\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.227870 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-client-ca\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.227967 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-client-ca\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.228053 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-config\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.329699 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-config\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.329788 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-proxy-ca-bundles\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.329866 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f135d4c-6abc-470e-a800-189cc1f887d1-serving-cert\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.329906 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qx2jc\" (UniqueName: \"kubernetes.io/projected/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-kube-api-access-qx2jc\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.329936 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-serving-cert\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.329969 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jm66f\" (UniqueName: \"kubernetes.io/projected/8f135d4c-6abc-470e-a800-189cc1f887d1-kube-api-access-jm66f\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.331532 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-proxy-ca-bundles\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.331567 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-config\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.331983 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-client-ca\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.332038 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-client-ca\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.333913 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-config\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.333995 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-client-ca\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.334535 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-client-ca\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.335132 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-config\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.343247 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-serving-cert\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.346605 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f135d4c-6abc-470e-a800-189cc1f887d1-serving-cert\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.347405 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qx2jc\" (UniqueName: \"kubernetes.io/projected/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-kube-api-access-qx2jc\") pod \"controller-manager-6484fcb57b-bhzfs\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.350130 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jm66f\" (UniqueName: \"kubernetes.io/projected/8f135d4c-6abc-470e-a800-189cc1f887d1-kube-api-access-jm66f\") pod \"route-controller-manager-5fb6b7b88-7q8xx\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.438635 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.441593 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.711212 5102 generic.go:334] "Generic (PLEG): container finished" podID="d4fd1abf-1877-484e-88c6-e4139db1c7b3" containerID="c87082c1360bda953d9a05442df6b51bf4b97a29913f0faceedbae65cb87d514" exitCode=0 Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.712222 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d4fd1abf-1877-484e-88c6-e4139db1c7b3","Type":"ContainerDied","Data":"c87082c1360bda953d9a05442df6b51bf4b97a29913f0faceedbae65cb87d514"} Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.866436 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx"] Jan 23 06:57:42 crc kubenswrapper[5102]: I0123 06:57:42.941422 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-6484fcb57b-bhzfs"] Jan 23 06:57:42 crc kubenswrapper[5102]: W0123 06:57:42.944209 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9bafd5e1_1b88_4286_bab9_fbf3c3009dad.slice/crio-5cabbbebb4e7aca0f80742a1549646da9d06618e1b409670397004ef6206d039 WatchSource:0}: Error finding container 5cabbbebb4e7aca0f80742a1549646da9d06618e1b409670397004ef6206d039: Status 404 returned error can't find the container with id 5cabbbebb4e7aca0f80742a1549646da9d06618e1b409670397004ef6206d039 Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.717063 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" event={"ID":"9bafd5e1-1b88-4286-bab9-fbf3c3009dad","Type":"ContainerStarted","Data":"0d57e89faa95a234ddb48de31114e704bdb05ee13edd3baee9486700a3eb4a65"} Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.717439 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" event={"ID":"9bafd5e1-1b88-4286-bab9-fbf3c3009dad","Type":"ContainerStarted","Data":"5cabbbebb4e7aca0f80742a1549646da9d06618e1b409670397004ef6206d039"} Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.718806 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.721259 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" event={"ID":"8f135d4c-6abc-470e-a800-189cc1f887d1","Type":"ContainerStarted","Data":"9e5257ecba0e09d2f4797402c55913eccfc542d100aa4ca36e0361555357f012"} Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.721288 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.721302 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" event={"ID":"8f135d4c-6abc-470e-a800-189cc1f887d1","Type":"ContainerStarted","Data":"bbd1a7ee840d2c85bbd59ab292d9134ebe3a7eb8701ac1b12a30d39b09a00d06"} Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.784508 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" podStartSLOduration=3.784488472 podStartE2EDuration="3.784488472s" podCreationTimestamp="2026-01-23 06:57:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:57:43.78411674 +0000 UTC m=+214.604465715" watchObservedRunningTime="2026-01-23 06:57:43.784488472 +0000 UTC m=+214.604837457" Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.792122 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.809128 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" podStartSLOduration=3.809106887 podStartE2EDuration="3.809106887s" podCreationTimestamp="2026-01-23 06:57:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:57:43.808938821 +0000 UTC m=+214.629287806" watchObservedRunningTime="2026-01-23 06:57:43.809106887 +0000 UTC m=+214.629455862" Jan 23 06:57:43 crc kubenswrapper[5102]: I0123 06:57:43.975807 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.133953 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.264257 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kube-api-access\") pod \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.264325 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kubelet-dir\") pod \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\" (UID: \"d4fd1abf-1877-484e-88c6-e4139db1c7b3\") " Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.264589 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d4fd1abf-1877-484e-88c6-e4139db1c7b3" (UID: "d4fd1abf-1877-484e-88c6-e4139db1c7b3"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.273146 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d4fd1abf-1877-484e-88c6-e4139db1c7b3" (UID: "d4fd1abf-1877-484e-88c6-e4139db1c7b3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.365274 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.365317 5102 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d4fd1abf-1877-484e-88c6-e4139db1c7b3-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.745339 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"d4fd1abf-1877-484e-88c6-e4139db1c7b3","Type":"ContainerDied","Data":"fef8f6a5fb94eda4744d41e26e7ccb923120290dc4c504c905cd0e7ae50b1ffe"} Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.745407 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Jan 23 06:57:44 crc kubenswrapper[5102]: I0123 06:57:44.745755 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fef8f6a5fb94eda4744d41e26e7ccb923120290dc4c504c905cd0e7ae50b1ffe" Jan 23 06:57:46 crc kubenswrapper[5102]: I0123 06:57:46.768503 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 06:57:46 crc kubenswrapper[5102]: I0123 06:57:46.768582 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 06:57:46 crc kubenswrapper[5102]: I0123 06:57:46.768635 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 06:57:46 crc kubenswrapper[5102]: I0123 06:57:46.769221 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 06:57:46 crc kubenswrapper[5102]: I0123 06:57:46.769321 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b" gracePeriod=600 Jan 23 06:57:47 crc kubenswrapper[5102]: I0123 06:57:47.763923 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b" exitCode=0 Jan 23 06:57:47 crc kubenswrapper[5102]: I0123 06:57:47.764598 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b"} Jan 23 06:57:47 crc kubenswrapper[5102]: I0123 06:57:47.764637 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"a4b5a462104bcad6d87d3ad5d1a4e712c5682a2573ed394d5ddf17fee434ba9c"} Jan 23 06:57:48 crc kubenswrapper[5102]: I0123 06:57:48.773255 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cxz8x" event={"ID":"a67b60a8-bbb4-471d-a0d5-da47ec4819d2","Type":"ContainerStarted","Data":"f1c856083c73905fa667ee03d049531c9b1d7f1afb866b4cdbc6eb9abc8375fb"} Jan 23 06:57:49 crc kubenswrapper[5102]: I0123 06:57:49.781325 5102 generic.go:334] "Generic (PLEG): container finished" podID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerID="f1c856083c73905fa667ee03d049531c9b1d7f1afb866b4cdbc6eb9abc8375fb" exitCode=0 Jan 23 06:57:49 crc kubenswrapper[5102]: I0123 06:57:49.781351 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cxz8x" event={"ID":"a67b60a8-bbb4-471d-a0d5-da47ec4819d2","Type":"ContainerDied","Data":"f1c856083c73905fa667ee03d049531c9b1d7f1afb866b4cdbc6eb9abc8375fb"} Jan 23 06:57:50 crc kubenswrapper[5102]: I0123 06:57:50.793274 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cxz8x" event={"ID":"a67b60a8-bbb4-471d-a0d5-da47ec4819d2","Type":"ContainerStarted","Data":"08e689498065ace79853c92cc7afbf093dbc4defdc07b884075403b75ccdaf54"} Jan 23 06:57:50 crc kubenswrapper[5102]: I0123 06:57:50.817646 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-cxz8x" podStartSLOduration=3.950923083 podStartE2EDuration="1m3.817623088s" podCreationTimestamp="2026-01-23 06:56:47 +0000 UTC" firstStartedPulling="2026-01-23 06:56:50.342694633 +0000 UTC m=+161.163043608" lastFinishedPulling="2026-01-23 06:57:50.209394628 +0000 UTC m=+221.029743613" observedRunningTime="2026-01-23 06:57:50.81558559 +0000 UTC m=+221.635934585" watchObservedRunningTime="2026-01-23 06:57:50.817623088 +0000 UTC m=+221.637972063" Jan 23 06:57:53 crc kubenswrapper[5102]: I0123 06:57:53.815293 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86qsn" event={"ID":"63c37b81-081b-4a4c-b448-26e58f97493b","Type":"ContainerStarted","Data":"729d97cae4af50e12746d889151cb1241fc3f64240c4d45f11c648ee0684ac8a"} Jan 23 06:57:53 crc kubenswrapper[5102]: I0123 06:57:53.817201 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerStarted","Data":"5ebe91b262b728417cd5ad3066732603c880403bed578c19e0ff904a212dc9fb"} Jan 23 06:57:54 crc kubenswrapper[5102]: I0123 06:57:54.826515 5102 generic.go:334] "Generic (PLEG): container finished" podID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerID="5ebe91b262b728417cd5ad3066732603c880403bed578c19e0ff904a212dc9fb" exitCode=0 Jan 23 06:57:54 crc kubenswrapper[5102]: I0123 06:57:54.826769 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerDied","Data":"5ebe91b262b728417cd5ad3066732603c880403bed578c19e0ff904a212dc9fb"} Jan 23 06:57:54 crc kubenswrapper[5102]: I0123 06:57:54.828935 5102 generic.go:334] "Generic (PLEG): container finished" podID="63c37b81-081b-4a4c-b448-26e58f97493b" containerID="729d97cae4af50e12746d889151cb1241fc3f64240c4d45f11c648ee0684ac8a" exitCode=0 Jan 23 06:57:54 crc kubenswrapper[5102]: I0123 06:57:54.829004 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86qsn" event={"ID":"63c37b81-081b-4a4c-b448-26e58f97493b","Type":"ContainerDied","Data":"729d97cae4af50e12746d889151cb1241fc3f64240c4d45f11c648ee0684ac8a"} Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.837010 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerStarted","Data":"24ac4692236247f5e39baaba7e80017233e482ac79c99c3b223c7b05cbedf074"} Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.839505 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86qsn" event={"ID":"63c37b81-081b-4a4c-b448-26e58f97493b","Type":"ContainerStarted","Data":"9cdff3e90878c1b73b58427afeb0ebede4eec168517d8931f9840f9e0cd63c0c"} Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.841032 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerStarted","Data":"e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3"} Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.842661 5102 generic.go:334] "Generic (PLEG): container finished" podID="0b589483-946f-4931-8bae-7b38e37682b3" containerID="84c2eea1725ca19fdc240f5aab958c4410dba652b4566603f75e4e9e40697d0a" exitCode=0 Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.842719 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5948" event={"ID":"0b589483-946f-4931-8bae-7b38e37682b3","Type":"ContainerDied","Data":"84c2eea1725ca19fdc240f5aab958c4410dba652b4566603f75e4e9e40697d0a"} Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.846082 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerStarted","Data":"643df5be6247c72b3d54aa4e4797bde5b4c2ef47beb92635169f61b0c0293eeb"} Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.856020 5102 generic.go:334] "Generic (PLEG): container finished" podID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerID="8b8cd6b44af39aea24a42e6a8e7de7f8da063c03bbff3114a89bd075a14ab0aa" exitCode=0 Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.856062 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7h5pc" event={"ID":"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae","Type":"ContainerDied","Data":"8b8cd6b44af39aea24a42e6a8e7de7f8da063c03bbff3114a89bd075a14ab0aa"} Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.953681 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nwnmg" podStartSLOduration=3.042844467 podStartE2EDuration="1m5.953662888s" podCreationTimestamp="2026-01-23 06:56:50 +0000 UTC" firstStartedPulling="2026-01-23 06:56:52.423763358 +0000 UTC m=+163.244112333" lastFinishedPulling="2026-01-23 06:57:55.334581779 +0000 UTC m=+226.154930754" observedRunningTime="2026-01-23 06:57:55.950447182 +0000 UTC m=+226.770796157" watchObservedRunningTime="2026-01-23 06:57:55.953662888 +0000 UTC m=+226.774011863" Jan 23 06:57:55 crc kubenswrapper[5102]: I0123 06:57:55.977124 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-86qsn" podStartSLOduration=5.062055057 podStartE2EDuration="1m8.977090394s" podCreationTimestamp="2026-01-23 06:56:47 +0000 UTC" firstStartedPulling="2026-01-23 06:56:51.365420941 +0000 UTC m=+162.185769916" lastFinishedPulling="2026-01-23 06:57:55.280456278 +0000 UTC m=+226.100805253" observedRunningTime="2026-01-23 06:57:55.974808038 +0000 UTC m=+226.795157013" watchObservedRunningTime="2026-01-23 06:57:55.977090394 +0000 UTC m=+226.797439389" Jan 23 06:57:56 crc kubenswrapper[5102]: I0123 06:57:56.865620 5102 generic.go:334] "Generic (PLEG): container finished" podID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerID="e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3" exitCode=0 Jan 23 06:57:56 crc kubenswrapper[5102]: I0123 06:57:56.865701 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerDied","Data":"e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3"} Jan 23 06:57:56 crc kubenswrapper[5102]: I0123 06:57:56.869339 5102 generic.go:334] "Generic (PLEG): container finished" podID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerID="24ac4692236247f5e39baaba7e80017233e482ac79c99c3b223c7b05cbedf074" exitCode=0 Jan 23 06:57:56 crc kubenswrapper[5102]: I0123 06:57:56.870041 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerDied","Data":"24ac4692236247f5e39baaba7e80017233e482ac79c99c3b223c7b05cbedf074"} Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.280787 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.281301 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.407630 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.407718 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.886160 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5948" event={"ID":"0b589483-946f-4931-8bae-7b38e37682b3","Type":"ContainerStarted","Data":"f6bf7c24330303cd409517ae9e3176fc3be3ed28aa52413612bf288c1abec462"} Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.893418 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7h5pc" event={"ID":"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae","Type":"ContainerStarted","Data":"f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f"} Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.895156 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sl6q6" event={"ID":"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d","Type":"ContainerStarted","Data":"0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15"} Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.909875 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b5948" podStartSLOduration=4.404486314 podStartE2EDuration="1m9.909850965s" podCreationTimestamp="2026-01-23 06:56:49 +0000 UTC" firstStartedPulling="2026-01-23 06:56:52.417114177 +0000 UTC m=+163.237463152" lastFinishedPulling="2026-01-23 06:57:57.922478828 +0000 UTC m=+228.742827803" observedRunningTime="2026-01-23 06:57:58.906592737 +0000 UTC m=+229.726941712" watchObservedRunningTime="2026-01-23 06:57:58.909850965 +0000 UTC m=+229.730199940" Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.943598 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7h5pc" podStartSLOduration=4.815306726 podStartE2EDuration="1m8.943574081s" podCreationTimestamp="2026-01-23 06:56:50 +0000 UTC" firstStartedPulling="2026-01-23 06:56:53.53610359 +0000 UTC m=+164.356452575" lastFinishedPulling="2026-01-23 06:57:57.664370955 +0000 UTC m=+228.484719930" observedRunningTime="2026-01-23 06:57:58.940362645 +0000 UTC m=+229.760711620" watchObservedRunningTime="2026-01-23 06:57:58.943574081 +0000 UTC m=+229.763923056" Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.953079 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:57:58 crc kubenswrapper[5102]: I0123 06:57:58.965335 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:57:59 crc kubenswrapper[5102]: I0123 06:57:59.017683 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:57:59 crc kubenswrapper[5102]: I0123 06:57:59.902472 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerStarted","Data":"351c3231f89bda4286b4781d1b0a9ab78ec735edb17cce7b2b859732346b13ee"} Jan 23 06:57:59 crc kubenswrapper[5102]: I0123 06:57:59.905830 5102 generic.go:334] "Generic (PLEG): container finished" podID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerID="0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15" exitCode=0 Jan 23 06:57:59 crc kubenswrapper[5102]: I0123 06:57:59.909361 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sl6q6" event={"ID":"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d","Type":"ContainerDied","Data":"0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15"} Jan 23 06:57:59 crc kubenswrapper[5102]: I0123 06:57:59.909434 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerStarted","Data":"8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f"} Jan 23 06:57:59 crc kubenswrapper[5102]: I0123 06:57:59.923421 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-8hw9t" podStartSLOduration=5.295497778 podStartE2EDuration="1m9.923400329s" podCreationTimestamp="2026-01-23 06:56:50 +0000 UTC" firstStartedPulling="2026-01-23 06:56:53.538709697 +0000 UTC m=+164.359058672" lastFinishedPulling="2026-01-23 06:57:58.166612248 +0000 UTC m=+228.986961223" observedRunningTime="2026-01-23 06:57:59.920760612 +0000 UTC m=+230.741109587" watchObservedRunningTime="2026-01-23 06:57:59.923400329 +0000 UTC m=+230.743749294" Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.257404 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-lwmjx" podStartSLOduration=6.338072078 podStartE2EDuration="1m13.257375943s" podCreationTimestamp="2026-01-23 06:56:47 +0000 UTC" firstStartedPulling="2026-01-23 06:56:51.368587366 +0000 UTC m=+162.188936341" lastFinishedPulling="2026-01-23 06:57:58.287891231 +0000 UTC m=+229.108240206" observedRunningTime="2026-01-23 06:57:59.972896847 +0000 UTC m=+230.793245822" watchObservedRunningTime="2026-01-23 06:58:00.257375943 +0000 UTC m=+231.077724918" Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.260278 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6484fcb57b-bhzfs"] Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.260631 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" podUID="9bafd5e1-1b88-4286-bab9-fbf3c3009dad" containerName="controller-manager" containerID="cri-o://0d57e89faa95a234ddb48de31114e704bdb05ee13edd3baee9486700a3eb4a65" gracePeriod=30 Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.335339 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx"] Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.335912 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" podUID="8f135d4c-6abc-470e-a800-189cc1f887d1" containerName="route-controller-manager" containerID="cri-o://9e5257ecba0e09d2f4797402c55913eccfc542d100aa4ca36e0361555357f012" gracePeriod=30 Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.371570 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.371784 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.514724 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.514802 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:58:00 crc kubenswrapper[5102]: I0123 06:58:00.579294 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.126593 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.126662 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.166470 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.166595 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.419624 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-marketplace-b5948" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="registry-server" probeResult="failure" output=< Jan 23 06:58:01 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 06:58:01 crc kubenswrapper[5102]: > Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.933049 5102 generic.go:334] "Generic (PLEG): container finished" podID="8f135d4c-6abc-470e-a800-189cc1f887d1" containerID="9e5257ecba0e09d2f4797402c55913eccfc542d100aa4ca36e0361555357f012" exitCode=0 Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.933174 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" event={"ID":"8f135d4c-6abc-470e-a800-189cc1f887d1","Type":"ContainerDied","Data":"9e5257ecba0e09d2f4797402c55913eccfc542d100aa4ca36e0361555357f012"} Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.936001 5102 generic.go:334] "Generic (PLEG): container finished" podID="9bafd5e1-1b88-4286-bab9-fbf3c3009dad" containerID="0d57e89faa95a234ddb48de31114e704bdb05ee13edd3baee9486700a3eb4a65" exitCode=0 Jan 23 06:58:01 crc kubenswrapper[5102]: I0123 06:58:01.936055 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" event={"ID":"9bafd5e1-1b88-4286-bab9-fbf3c3009dad","Type":"ContainerDied","Data":"0d57e89faa95a234ddb48de31114e704bdb05ee13edd3baee9486700a3eb4a65"} Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.165518 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8hw9t" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="registry-server" probeResult="failure" output=< Jan 23 06:58:02 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 06:58:02 crc kubenswrapper[5102]: > Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.216835 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nwnmg" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="registry-server" probeResult="failure" output=< Jan 23 06:58:02 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 06:58:02 crc kubenswrapper[5102]: > Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.445530 5102 patch_prober.go:28] interesting pod/route-controller-manager-5fb6b7b88-7q8xx container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.57:8443/healthz\": dial tcp 10.217.0.57:8443: connect: connection refused" start-of-body= Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.445667 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" podUID="8f135d4c-6abc-470e-a800-189cc1f887d1" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.57:8443/healthz\": dial tcp 10.217.0.57:8443: connect: connection refused" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.776260 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.840302 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-config\") pod \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.840431 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-serving-cert\") pod \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.840498 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-proxy-ca-bundles\") pod \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.840562 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qx2jc\" (UniqueName: \"kubernetes.io/projected/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-kube-api-access-qx2jc\") pod \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.840666 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-client-ca\") pod \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\" (UID: \"9bafd5e1-1b88-4286-bab9-fbf3c3009dad\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.841749 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-client-ca" (OuterVolumeSpecName: "client-ca") pod "9bafd5e1-1b88-4286-bab9-fbf3c3009dad" (UID: "9bafd5e1-1b88-4286-bab9-fbf3c3009dad"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.841815 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-config" (OuterVolumeSpecName: "config") pod "9bafd5e1-1b88-4286-bab9-fbf3c3009dad" (UID: "9bafd5e1-1b88-4286-bab9-fbf3c3009dad"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.842085 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "9bafd5e1-1b88-4286-bab9-fbf3c3009dad" (UID: "9bafd5e1-1b88-4286-bab9-fbf3c3009dad"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.842133 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-649fffcbbd-mbdll"] Jan 23 06:58:02 crc kubenswrapper[5102]: E0123 06:58:02.842433 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9bafd5e1-1b88-4286-bab9-fbf3c3009dad" containerName="controller-manager" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.842451 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9bafd5e1-1b88-4286-bab9-fbf3c3009dad" containerName="controller-manager" Jan 23 06:58:02 crc kubenswrapper[5102]: E0123 06:58:02.842482 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d4fd1abf-1877-484e-88c6-e4139db1c7b3" containerName="pruner" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.842488 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d4fd1abf-1877-484e-88c6-e4139db1c7b3" containerName="pruner" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.842655 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d4fd1abf-1877-484e-88c6-e4139db1c7b3" containerName="pruner" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.842671 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9bafd5e1-1b88-4286-bab9-fbf3c3009dad" containerName="controller-manager" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.843429 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.848918 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-kube-api-access-qx2jc" (OuterVolumeSpecName: "kube-api-access-qx2jc") pod "9bafd5e1-1b88-4286-bab9-fbf3c3009dad" (UID: "9bafd5e1-1b88-4286-bab9-fbf3c3009dad"). InnerVolumeSpecName "kube-api-access-qx2jc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.851409 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-649fffcbbd-mbdll"] Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.859982 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9bafd5e1-1b88-4286-bab9-fbf3c3009dad" (UID: "9bafd5e1-1b88-4286-bab9-fbf3c3009dad"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.923362 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.941768 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-config\") pod \"8f135d4c-6abc-470e-a800-189cc1f887d1\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.941910 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f135d4c-6abc-470e-a800-189cc1f887d1-serving-cert\") pod \"8f135d4c-6abc-470e-a800-189cc1f887d1\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.941998 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jm66f\" (UniqueName: \"kubernetes.io/projected/8f135d4c-6abc-470e-a800-189cc1f887d1-kube-api-access-jm66f\") pod \"8f135d4c-6abc-470e-a800-189cc1f887d1\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942069 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-client-ca\") pod \"8f135d4c-6abc-470e-a800-189cc1f887d1\" (UID: \"8f135d4c-6abc-470e-a800-189cc1f887d1\") " Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942390 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/07353947-651e-406f-8737-5ff7a2ef9cba-serving-cert\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942502 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-config\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942577 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pf9t\" (UniqueName: \"kubernetes.io/projected/07353947-651e-406f-8737-5ff7a2ef9cba-kube-api-access-9pf9t\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942703 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-proxy-ca-bundles\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942807 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-client-ca\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942899 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942936 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942962 5102 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.942985 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qx2jc\" (UniqueName: \"kubernetes.io/projected/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-kube-api-access-qx2jc\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.943006 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/9bafd5e1-1b88-4286-bab9-fbf3c3009dad-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.943912 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-client-ca" (OuterVolumeSpecName: "client-ca") pod "8f135d4c-6abc-470e-a800-189cc1f887d1" (UID: "8f135d4c-6abc-470e-a800-189cc1f887d1"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.945058 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-config" (OuterVolumeSpecName: "config") pod "8f135d4c-6abc-470e-a800-189cc1f887d1" (UID: "8f135d4c-6abc-470e-a800-189cc1f887d1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.955730 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f135d4c-6abc-470e-a800-189cc1f887d1-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8f135d4c-6abc-470e-a800-189cc1f887d1" (UID: "8f135d4c-6abc-470e-a800-189cc1f887d1"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.955738 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f135d4c-6abc-470e-a800-189cc1f887d1-kube-api-access-jm66f" (OuterVolumeSpecName: "kube-api-access-jm66f") pod "8f135d4c-6abc-470e-a800-189cc1f887d1" (UID: "8f135d4c-6abc-470e-a800-189cc1f887d1"). InnerVolumeSpecName "kube-api-access-jm66f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.958007 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" event={"ID":"9bafd5e1-1b88-4286-bab9-fbf3c3009dad","Type":"ContainerDied","Data":"5cabbbebb4e7aca0f80742a1549646da9d06618e1b409670397004ef6206d039"} Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.958060 5102 scope.go:117] "RemoveContainer" containerID="0d57e89faa95a234ddb48de31114e704bdb05ee13edd3baee9486700a3eb4a65" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.958199 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.962687 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" event={"ID":"8f135d4c-6abc-470e-a800-189cc1f887d1","Type":"ContainerDied","Data":"bbd1a7ee840d2c85bbd59ab292d9134ebe3a7eb8701ac1b12a30d39b09a00d06"} Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.962767 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.980918 5102 scope.go:117] "RemoveContainer" containerID="9e5257ecba0e09d2f4797402c55913eccfc542d100aa4ca36e0361555357f012" Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.991449 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-6484fcb57b-bhzfs"] Jan 23 06:58:02 crc kubenswrapper[5102]: I0123 06:58:02.997160 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-6484fcb57b-bhzfs"] Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.017786 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx"] Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.021354 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-5fb6b7b88-7q8xx"] Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045010 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-proxy-ca-bundles\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045105 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-client-ca\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045148 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/07353947-651e-406f-8737-5ff7a2ef9cba-serving-cert\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045195 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-config\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045224 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pf9t\" (UniqueName: \"kubernetes.io/projected/07353947-651e-406f-8737-5ff7a2ef9cba-kube-api-access-9pf9t\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045306 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045321 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f135d4c-6abc-470e-a800-189cc1f887d1-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045336 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f135d4c-6abc-470e-a800-189cc1f887d1-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.045348 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jm66f\" (UniqueName: \"kubernetes.io/projected/8f135d4c-6abc-470e-a800-189cc1f887d1-kube-api-access-jm66f\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.047188 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-client-ca\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.047649 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-config\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.048181 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-proxy-ca-bundles\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.050350 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/07353947-651e-406f-8737-5ff7a2ef9cba-serving-cert\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.064382 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pf9t\" (UniqueName: \"kubernetes.io/projected/07353947-651e-406f-8737-5ff7a2ef9cba-kube-api-access-9pf9t\") pod \"controller-manager-649fffcbbd-mbdll\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.221136 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.439737 5102 patch_prober.go:28] interesting pod/controller-manager-6484fcb57b-bhzfs container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.440337 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-6484fcb57b-bhzfs" podUID="9bafd5e1-1b88-4286-bab9-fbf3c3009dad" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.58:8443/healthz\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.615511 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f135d4c-6abc-470e-a800-189cc1f887d1" path="/var/lib/kubelet/pods/8f135d4c-6abc-470e-a800-189cc1f887d1/volumes" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.616476 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9bafd5e1-1b88-4286-bab9-fbf3c3009dad" path="/var/lib/kubelet/pods/9bafd5e1-1b88-4286-bab9-fbf3c3009dad/volumes" Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.667046 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-649fffcbbd-mbdll"] Jan 23 06:58:03 crc kubenswrapper[5102]: W0123 06:58:03.675989 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07353947_651e_406f_8737_5ff7a2ef9cba.slice/crio-3092b7ab962b19865a9700be9a70dba415bf558ba31f438598ca5079e2411ffd WatchSource:0}: Error finding container 3092b7ab962b19865a9700be9a70dba415bf558ba31f438598ca5079e2411ffd: Status 404 returned error can't find the container with id 3092b7ab962b19865a9700be9a70dba415bf558ba31f438598ca5079e2411ffd Jan 23 06:58:03 crc kubenswrapper[5102]: I0123 06:58:03.973433 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" event={"ID":"07353947-651e-406f-8737-5ff7a2ef9cba","Type":"ContainerStarted","Data":"3092b7ab962b19865a9700be9a70dba415bf558ba31f438598ca5079e2411ffd"} Jan 23 06:58:04 crc kubenswrapper[5102]: I0123 06:58:04.983747 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" event={"ID":"07353947-651e-406f-8737-5ff7a2ef9cba","Type":"ContainerStarted","Data":"8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518"} Jan 23 06:58:04 crc kubenswrapper[5102]: I0123 06:58:04.984590 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.012637 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" podStartSLOduration=5.01260606 podStartE2EDuration="5.01260606s" podCreationTimestamp="2026-01-23 06:58:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:58:05.006782407 +0000 UTC m=+235.827131392" watchObservedRunningTime="2026-01-23 06:58:05.01260606 +0000 UTC m=+235.832955035" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.048142 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk"] Jan 23 06:58:05 crc kubenswrapper[5102]: E0123 06:58:05.048368 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8f135d4c-6abc-470e-a800-189cc1f887d1" containerName="route-controller-manager" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.048382 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="8f135d4c-6abc-470e-a800-189cc1f887d1" containerName="route-controller-manager" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.048482 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="8f135d4c-6abc-470e-a800-189cc1f887d1" containerName="route-controller-manager" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.048851 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.050743 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.051233 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.051301 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.051413 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.053258 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.054126 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.061479 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk"] Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.081306 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-client-ca\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.081392 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-config\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.082170 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1bb46fe-01d2-426a-977c-057b7efbc8c0-serving-cert\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.082207 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jvx9j\" (UniqueName: \"kubernetes.io/projected/a1bb46fe-01d2-426a-977c-057b7efbc8c0-kube-api-access-jvx9j\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.105915 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.184577 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-config\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.185393 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1bb46fe-01d2-426a-977c-057b7efbc8c0-serving-cert\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.185451 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jvx9j\" (UniqueName: \"kubernetes.io/projected/a1bb46fe-01d2-426a-977c-057b7efbc8c0-kube-api-access-jvx9j\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.185591 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-client-ca\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.186321 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-config\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.187211 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-client-ca\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.242668 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1bb46fe-01d2-426a-977c-057b7efbc8c0-serving-cert\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.279109 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jvx9j\" (UniqueName: \"kubernetes.io/projected/a1bb46fe-01d2-426a-977c-057b7efbc8c0-kube-api-access-jvx9j\") pod \"route-controller-manager-7df6fd7b5b-fl6wk\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:05 crc kubenswrapper[5102]: I0123 06:58:05.390088 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:06 crc kubenswrapper[5102]: I0123 06:58:06.104642 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk"] Jan 23 06:58:07 crc kubenswrapper[5102]: I0123 06:58:07.002602 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" event={"ID":"a1bb46fe-01d2-426a-977c-057b7efbc8c0","Type":"ContainerStarted","Data":"963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690"} Jan 23 06:58:07 crc kubenswrapper[5102]: I0123 06:58:07.003141 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" event={"ID":"a1bb46fe-01d2-426a-977c-057b7efbc8c0","Type":"ContainerStarted","Data":"34de7c971e316227a662b88f3d86bbed9c18a1fb1cd0ebae195496c1404a0ff0"} Jan 23 06:58:07 crc kubenswrapper[5102]: I0123 06:58:07.006279 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sl6q6" event={"ID":"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d","Type":"ContainerStarted","Data":"68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf"} Jan 23 06:58:07 crc kubenswrapper[5102]: I0123 06:58:07.064517 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sl6q6" podStartSLOduration=4.6349573809999995 podStartE2EDuration="1m20.064481748s" podCreationTimestamp="2026-01-23 06:56:47 +0000 UTC" firstStartedPulling="2026-01-23 06:56:50.353117448 +0000 UTC m=+161.173466423" lastFinishedPulling="2026-01-23 06:58:05.782641815 +0000 UTC m=+236.602990790" observedRunningTime="2026-01-23 06:58:07.053011368 +0000 UTC m=+237.873360343" watchObservedRunningTime="2026-01-23 06:58:07.064481748 +0000 UTC m=+237.884830723" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.011508 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.020348 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.024435 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.024502 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.039481 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" podStartSLOduration=8.039458006 podStartE2EDuration="8.039458006s" podCreationTimestamp="2026-01-23 06:58:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:58:08.034697588 +0000 UTC m=+238.855046563" watchObservedRunningTime="2026-01-23 06:58:08.039458006 +0000 UTC m=+238.859806981" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.286552 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.286966 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.493364 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:58:08 crc kubenswrapper[5102]: I0123 06:58:08.503507 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:58:09 crc kubenswrapper[5102]: I0123 06:58:09.084230 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:58:09 crc kubenswrapper[5102]: I0123 06:58:09.276942 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/certified-operators-sl6q6" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="registry-server" probeResult="failure" output=< Jan 23 06:58:09 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 06:58:09 crc kubenswrapper[5102]: > Jan 23 06:58:10 crc kubenswrapper[5102]: I0123 06:58:10.420743 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:58:10 crc kubenswrapper[5102]: I0123 06:58:10.435292 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-86qsn"] Jan 23 06:58:10 crc kubenswrapper[5102]: I0123 06:58:10.435674 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-86qsn" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="registry-server" containerID="cri-o://9cdff3e90878c1b73b58427afeb0ebede4eec168517d8931f9840f9e0cd63c0c" gracePeriod=2 Jan 23 06:58:10 crc kubenswrapper[5102]: I0123 06:58:10.472864 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:58:11 crc kubenswrapper[5102]: I0123 06:58:11.215401 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:58:11 crc kubenswrapper[5102]: I0123 06:58:11.255691 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:58:11 crc kubenswrapper[5102]: I0123 06:58:11.431998 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lwmjx"] Jan 23 06:58:11 crc kubenswrapper[5102]: I0123 06:58:11.563916 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-7h5pc" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="registry-server" probeResult="failure" output=< Jan 23 06:58:11 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 06:58:11 crc kubenswrapper[5102]: > Jan 23 06:58:12 crc kubenswrapper[5102]: I0123 06:58:12.045181 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-lwmjx" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="registry-server" containerID="cri-o://8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f" gracePeriod=2 Jan 23 06:58:12 crc kubenswrapper[5102]: I0123 06:58:12.168297 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-8hw9t" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="registry-server" probeResult="failure" output=< Jan 23 06:58:12 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 06:58:12 crc kubenswrapper[5102]: > Jan 23 06:58:12 crc kubenswrapper[5102]: E0123 06:58:12.647922 5102 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod63c37b81_081b_4a4c_b448_26e58f97493b.slice/crio-conmon-9cdff3e90878c1b73b58427afeb0ebede4eec168517d8931f9840f9e0cd63c0c.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod599dad18_1d4a_415a_9fab_c8a5ea7521ed.slice/crio-8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f.scope\": RecentStats: unable to find data in memory cache]" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.032939 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.062785 5102 generic.go:334] "Generic (PLEG): container finished" podID="63c37b81-081b-4a4c-b448-26e58f97493b" containerID="9cdff3e90878c1b73b58427afeb0ebede4eec168517d8931f9840f9e0cd63c0c" exitCode=0 Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.062938 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86qsn" event={"ID":"63c37b81-081b-4a4c-b448-26e58f97493b","Type":"ContainerDied","Data":"9cdff3e90878c1b73b58427afeb0ebede4eec168517d8931f9840f9e0cd63c0c"} Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.065140 5102 generic.go:334] "Generic (PLEG): container finished" podID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerID="8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f" exitCode=0 Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.065181 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerDied","Data":"8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f"} Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.065577 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-lwmjx" event={"ID":"599dad18-1d4a-415a-9fab-c8a5ea7521ed","Type":"ContainerDied","Data":"a57a335cbdc5941333c7dce761b3280e4cd5a67f5c2ae3471b380bfb129527b1"} Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.065601 5102 scope.go:117] "RemoveContainer" containerID="8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.065743 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-lwmjx" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.092523 5102 scope.go:117] "RemoveContainer" containerID="e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.129187 5102 scope.go:117] "RemoveContainer" containerID="c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.151919 5102 scope.go:117] "RemoveContainer" containerID="8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f" Jan 23 06:58:13 crc kubenswrapper[5102]: E0123 06:58:13.152680 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f\": container with ID starting with 8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f not found: ID does not exist" containerID="8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.152723 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f"} err="failed to get container status \"8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f\": rpc error: code = NotFound desc = could not find container \"8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f\": container with ID starting with 8eab8f8ea5dbbe52ff569e17bcb17e29af8197b93a172b11567c5706e322516f not found: ID does not exist" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.152754 5102 scope.go:117] "RemoveContainer" containerID="e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3" Jan 23 06:58:13 crc kubenswrapper[5102]: E0123 06:58:13.153030 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3\": container with ID starting with e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3 not found: ID does not exist" containerID="e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.153065 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3"} err="failed to get container status \"e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3\": rpc error: code = NotFound desc = could not find container \"e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3\": container with ID starting with e26d122dc5ac7888fa8fe0ab2aee32a70954ad9051fa495acadf8d2b2c0edef3 not found: ID does not exist" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.153088 5102 scope.go:117] "RemoveContainer" containerID="c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c" Jan 23 06:58:13 crc kubenswrapper[5102]: E0123 06:58:13.154406 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c\": container with ID starting with c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c not found: ID does not exist" containerID="c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.154434 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c"} err="failed to get container status \"c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c\": rpc error: code = NotFound desc = could not find container \"c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c\": container with ID starting with c75a1a7a53515d78780d2eb2f6bc592de113a03d87d51d342b4a41c748358a0c not found: ID does not exist" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.218488 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-catalog-content\") pod \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.218609 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gr9qr\" (UniqueName: \"kubernetes.io/projected/599dad18-1d4a-415a-9fab-c8a5ea7521ed-kube-api-access-gr9qr\") pod \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.218685 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-utilities\") pod \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\" (UID: \"599dad18-1d4a-415a-9fab-c8a5ea7521ed\") " Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.219845 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-utilities" (OuterVolumeSpecName: "utilities") pod "599dad18-1d4a-415a-9fab-c8a5ea7521ed" (UID: "599dad18-1d4a-415a-9fab-c8a5ea7521ed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.226095 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/599dad18-1d4a-415a-9fab-c8a5ea7521ed-kube-api-access-gr9qr" (OuterVolumeSpecName: "kube-api-access-gr9qr") pod "599dad18-1d4a-415a-9fab-c8a5ea7521ed" (UID: "599dad18-1d4a-415a-9fab-c8a5ea7521ed"). InnerVolumeSpecName "kube-api-access-gr9qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.278328 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "599dad18-1d4a-415a-9fab-c8a5ea7521ed" (UID: "599dad18-1d4a-415a-9fab-c8a5ea7521ed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.321089 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.321990 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/599dad18-1d4a-415a-9fab-c8a5ea7521ed-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.322043 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gr9qr\" (UniqueName: \"kubernetes.io/projected/599dad18-1d4a-415a-9fab-c8a5ea7521ed-kube-api-access-gr9qr\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.403238 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-lwmjx"] Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.409195 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-lwmjx"] Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.562058 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.607307 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" path="/var/lib/kubelet/pods/599dad18-1d4a-415a-9fab-c8a5ea7521ed/volumes" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.628867 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-catalog-content\") pod \"63c37b81-081b-4a4c-b448-26e58f97493b\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.629068 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ggqvw\" (UniqueName: \"kubernetes.io/projected/63c37b81-081b-4a4c-b448-26e58f97493b-kube-api-access-ggqvw\") pod \"63c37b81-081b-4a4c-b448-26e58f97493b\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.629253 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-utilities\") pod \"63c37b81-081b-4a4c-b448-26e58f97493b\" (UID: \"63c37b81-081b-4a4c-b448-26e58f97493b\") " Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.630104 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-utilities" (OuterVolumeSpecName: "utilities") pod "63c37b81-081b-4a4c-b448-26e58f97493b" (UID: "63c37b81-081b-4a4c-b448-26e58f97493b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.633868 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/63c37b81-081b-4a4c-b448-26e58f97493b-kube-api-access-ggqvw" (OuterVolumeSpecName: "kube-api-access-ggqvw") pod "63c37b81-081b-4a4c-b448-26e58f97493b" (UID: "63c37b81-081b-4a4c-b448-26e58f97493b"). InnerVolumeSpecName "kube-api-access-ggqvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.703560 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "63c37b81-081b-4a4c-b448-26e58f97493b" (UID: "63c37b81-081b-4a4c-b448-26e58f97493b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.734734 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.734782 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ggqvw\" (UniqueName: \"kubernetes.io/projected/63c37b81-081b-4a4c-b448-26e58f97493b-kube-api-access-ggqvw\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:13 crc kubenswrapper[5102]: I0123 06:58:13.734796 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/63c37b81-081b-4a4c-b448-26e58f97493b-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:14 crc kubenswrapper[5102]: I0123 06:58:14.075702 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-86qsn" event={"ID":"63c37b81-081b-4a4c-b448-26e58f97493b","Type":"ContainerDied","Data":"4a2a06eb7625e8bc1918859d0053c34cd02d80550d61f55b5db559ad46516d3d"} Jan 23 06:58:14 crc kubenswrapper[5102]: I0123 06:58:14.075778 5102 scope.go:117] "RemoveContainer" containerID="9cdff3e90878c1b73b58427afeb0ebede4eec168517d8931f9840f9e0cd63c0c" Jan 23 06:58:14 crc kubenswrapper[5102]: I0123 06:58:14.075730 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-86qsn" Jan 23 06:58:14 crc kubenswrapper[5102]: I0123 06:58:14.098646 5102 scope.go:117] "RemoveContainer" containerID="729d97cae4af50e12746d889151cb1241fc3f64240c4d45f11c648ee0684ac8a" Jan 23 06:58:14 crc kubenswrapper[5102]: I0123 06:58:14.109659 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-86qsn"] Jan 23 06:58:14 crc kubenswrapper[5102]: I0123 06:58:14.114963 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-86qsn"] Jan 23 06:58:14 crc kubenswrapper[5102]: I0123 06:58:14.132147 5102 scope.go:117] "RemoveContainer" containerID="27bab8a3525adbe865aeaf6d5200e21a90d56e7c7bd39b3f0407dd32cfff9c71" Jan 23 06:58:15 crc kubenswrapper[5102]: I0123 06:58:15.608338 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" path="/var/lib/kubelet/pods/63c37b81-081b-4a4c-b448-26e58f97493b/volumes" Jan 23 06:58:16 crc kubenswrapper[5102]: I0123 06:58:16.351521 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-cwvr2"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.080223 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.127038 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.554985 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sl6q6"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.568501 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-cxz8x"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.568957 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-cxz8x" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="registry-server" containerID="cri-o://08e689498065ace79853c92cc7afbf093dbc4defdc07b884075403b75ccdaf54" gracePeriod=30 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.574098 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bwhrv"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.574383 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" containerName="marketplace-operator" containerID="cri-o://4bd43375d5210559dbf648dc48059b34e1c725c4eb5226d8c2ca8573a840c338" gracePeriod=30 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.589165 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7h5pc"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.589499 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7h5pc" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="registry-server" containerID="cri-o://f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f" gracePeriod=30 Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.596088 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.598723 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.598735 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wkhpj"] Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.599347 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="extract-utilities" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599380 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="extract-utilities" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.599396 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="registry-server" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599405 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="registry-server" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.599416 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="extract-utilities" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599427 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="extract-utilities" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.599448 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="extract-content" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599455 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="extract-content" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.599467 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="extract-content" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599475 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="extract-content" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.599483 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="registry-server" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599491 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="registry-server" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599646 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="599dad18-1d4a-415a-9fab-c8a5ea7521ed" containerName="registry-server" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.599661 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="63c37b81-081b-4a4c-b448-26e58f97493b" containerName="registry-server" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.600415 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.600568 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.600668 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openshift-marketplace/redhat-marketplace-7h5pc" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="registry-server" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.608446 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6273c4aa-9895-47ea-a3d6-9ac16123a30f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.608568 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6273c4aa-9895-47ea-a3d6-9ac16123a30f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.608693 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5l56l\" (UniqueName: \"kubernetes.io/projected/6273c4aa-9895-47ea-a3d6-9ac16123a30f-kube-api-access-5l56l\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.614601 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b5948"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.615052 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b5948" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="registry-server" containerID="cri-o://f6bf7c24330303cd409517ae9e3176fc3be3ed28aa52413612bf288c1abec462" gracePeriod=30 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.617815 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-8hw9t"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.618176 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-8hw9t" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="registry-server" containerID="cri-o://351c3231f89bda4286b4781d1b0a9ab78ec735edb17cce7b2b859732346b13ee" gracePeriod=30 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.623090 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wkhpj"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.631387 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nwnmg"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.631743 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nwnmg" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="registry-server" containerID="cri-o://643df5be6247c72b3d54aa4e4797bde5b4c2ef47beb92635169f61b0c0293eeb" gracePeriod=30 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.713166 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6273c4aa-9895-47ea-a3d6-9ac16123a30f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.713964 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6273c4aa-9895-47ea-a3d6-9ac16123a30f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.714032 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5l56l\" (UniqueName: \"kubernetes.io/projected/6273c4aa-9895-47ea-a3d6-9ac16123a30f-kube-api-access-5l56l\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.716164 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/6273c4aa-9895-47ea-a3d6-9ac16123a30f-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.737797 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/6273c4aa-9895-47ea-a3d6-9ac16123a30f-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.760304 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5l56l\" (UniqueName: \"kubernetes.io/projected/6273c4aa-9895-47ea-a3d6-9ac16123a30f-kube-api-access-5l56l\") pod \"marketplace-operator-79b997595-wkhpj\" (UID: \"6273c4aa-9895-47ea-a3d6-9ac16123a30f\") " pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.826885 5102 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828108 5102 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828343 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828657 5102 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828692 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a" gracePeriod=15 Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.828793 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828806 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.828816 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828822 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.828833 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828839 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.828849 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828856 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.828865 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828871 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.828879 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828886 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 06:58:18 crc kubenswrapper[5102]: E0123 06:58:18.828896 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.828901 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829011 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829031 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829042 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829052 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829061 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829070 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829264 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e" gracePeriod=15 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829472 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0" gracePeriod=15 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829519 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413" gracePeriod=15 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.829588 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e" gracePeriod=15 Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.834510 5102 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="f4b27818a5e8e43d0dc095d08835c792" podUID="71bb4a3aecc4ba5b26c4b7318770ce13" Jan 23 06:58:18 crc kubenswrapper[5102]: I0123 06:58:18.932345 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.017861 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.017923 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.017950 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.017975 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.017995 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.018255 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.018430 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.018455 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: E0123 06:58:19.101107 5102 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.195:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.119783 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.119907 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.119976 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.120018 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.120017 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.120059 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.120986 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.121101 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.123819 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.123958 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.123977 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.124016 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.124117 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.124152 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.124188 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.124206 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.129016 5102 generic.go:334] "Generic (PLEG): container finished" podID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerID="643df5be6247c72b3d54aa4e4797bde5b4c2ef47beb92635169f61b0c0293eeb" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.129082 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerDied","Data":"643df5be6247c72b3d54aa4e4797bde5b4c2ef47beb92635169f61b0c0293eeb"} Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.138187 5102 generic.go:334] "Generic (PLEG): container finished" podID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerID="f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.138285 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7h5pc" event={"ID":"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae","Type":"ContainerDied","Data":"f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f"} Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.140785 5102 generic.go:334] "Generic (PLEG): container finished" podID="cda46479-cb25-47ec-8de7-31c9d6e22960" containerID="4bd43375d5210559dbf648dc48059b34e1c725c4eb5226d8c2ca8573a840c338" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.140853 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" event={"ID":"cda46479-cb25-47ec-8de7-31c9d6e22960","Type":"ContainerDied","Data":"4bd43375d5210559dbf648dc48059b34e1c725c4eb5226d8c2ca8573a840c338"} Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.148381 5102 generic.go:334] "Generic (PLEG): container finished" podID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerID="351c3231f89bda4286b4781d1b0a9ab78ec735edb17cce7b2b859732346b13ee" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.148477 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerDied","Data":"351c3231f89bda4286b4781d1b0a9ab78ec735edb17cce7b2b859732346b13ee"} Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.153573 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.154834 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.155349 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.155368 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.155376 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.155385 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0" exitCode=2 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.155452 5102 scope.go:117] "RemoveContainer" containerID="2008307c64e91647f31a421e72f7bac65c2cfd67ba5009347c92706407759f40" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.157630 5102 generic.go:334] "Generic (PLEG): container finished" podID="0b589483-946f-4931-8bae-7b38e37682b3" containerID="f6bf7c24330303cd409517ae9e3176fc3be3ed28aa52413612bf288c1abec462" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.157694 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5948" event={"ID":"0b589483-946f-4931-8bae-7b38e37682b3","Type":"ContainerDied","Data":"f6bf7c24330303cd409517ae9e3176fc3be3ed28aa52413612bf288c1abec462"} Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.159870 5102 generic.go:334] "Generic (PLEG): container finished" podID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerID="08e689498065ace79853c92cc7afbf093dbc4defdc07b884075403b75ccdaf54" exitCode=0 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.160170 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sl6q6" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="registry-server" containerID="cri-o://68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf" gracePeriod=30 Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.161076 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cxz8x" event={"ID":"a67b60a8-bbb4-471d-a0d5-da47ec4819d2","Type":"ContainerDied","Data":"08e689498065ace79853c92cc7afbf093dbc4defdc07b884075403b75ccdaf54"} Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.161587 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: E0123 06:58:19.162853 5102 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.195:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-sl6q6.188d49ea454a6509 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-sl6q6,UID:2e9105da-eb2c-4ead-96d7-2ca6c190ef0d,APIVersion:v1,ResourceVersion:28484,FieldPath:spec.containers{registry-server},},Reason:Killing,Message:Stopping container registry-server,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 06:58:19.160151305 +0000 UTC m=+249.980500280,LastTimestamp:2026-01-23 06:58:19.160151305 +0000 UTC m=+249.980500280,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.313451 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.314456 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.314934 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.327370 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-trusted-ca\") pod \"cda46479-cb25-47ec-8de7-31c9d6e22960\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.327489 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vqzmw\" (UniqueName: \"kubernetes.io/projected/cda46479-cb25-47ec-8de7-31c9d6e22960-kube-api-access-vqzmw\") pod \"cda46479-cb25-47ec-8de7-31c9d6e22960\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.327577 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-operator-metrics\") pod \"cda46479-cb25-47ec-8de7-31c9d6e22960\" (UID: \"cda46479-cb25-47ec-8de7-31c9d6e22960\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.329052 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "cda46479-cb25-47ec-8de7-31c9d6e22960" (UID: "cda46479-cb25-47ec-8de7-31c9d6e22960"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.350987 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cda46479-cb25-47ec-8de7-31c9d6e22960-kube-api-access-vqzmw" (OuterVolumeSpecName: "kube-api-access-vqzmw") pod "cda46479-cb25-47ec-8de7-31c9d6e22960" (UID: "cda46479-cb25-47ec-8de7-31c9d6e22960"). InnerVolumeSpecName "kube-api-access-vqzmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.354787 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "cda46479-cb25-47ec-8de7-31c9d6e22960" (UID: "cda46479-cb25-47ec-8de7-31c9d6e22960"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.408212 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.430774 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vqzmw\" (UniqueName: \"kubernetes.io/projected/cda46479-cb25-47ec-8de7-31c9d6e22960-kube-api-access-vqzmw\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.430846 5102 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.430864 5102 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/cda46479-cb25-47ec-8de7-31c9d6e22960-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.576476 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.578400 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.578949 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.579283 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.585197 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.585754 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.586273 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.586599 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.586850 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.589092 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.590009 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.590358 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.590723 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.591146 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.591433 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.630981 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.633934 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.634566 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.635037 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.635834 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.678753 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.679520 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.679848 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.680162 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.680767 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.681389 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.681657 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.733808 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-utilities\") pod \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.733884 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-catalog-content\") pod \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.733915 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-utilities\") pod \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.733962 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-catalog-content\") pod \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.734004 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xrf7z\" (UniqueName: \"kubernetes.io/projected/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-kube-api-access-xrf7z\") pod \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\" (UID: \"a67b60a8-bbb4-471d-a0d5-da47ec4819d2\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.734028 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-catalog-content\") pod \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.734057 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-g8r4x\" (UniqueName: \"kubernetes.io/projected/1b442d58-fe80-472a-a33e-ec4e15eadd8c-kube-api-access-g8r4x\") pod \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\" (UID: \"1b442d58-fe80-472a-a33e-ec4e15eadd8c\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.734073 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-utilities\") pod \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.734096 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2rzfz\" (UniqueName: \"kubernetes.io/projected/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-kube-api-access-2rzfz\") pod \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\" (UID: \"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.736377 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-utilities" (OuterVolumeSpecName: "utilities") pod "45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" (UID: "45d96a0d-f31b-4afb-a528-58d7bbe8fe1b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.736465 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-utilities" (OuterVolumeSpecName: "utilities") pod "1b442d58-fe80-472a-a33e-ec4e15eadd8c" (UID: "1b442d58-fe80-472a-a33e-ec4e15eadd8c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.738517 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-utilities" (OuterVolumeSpecName: "utilities") pod "a67b60a8-bbb4-471d-a0d5-da47ec4819d2" (UID: "a67b60a8-bbb4-471d-a0d5-da47ec4819d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.746349 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b442d58-fe80-472a-a33e-ec4e15eadd8c-kube-api-access-g8r4x" (OuterVolumeSpecName: "kube-api-access-g8r4x") pod "1b442d58-fe80-472a-a33e-ec4e15eadd8c" (UID: "1b442d58-fe80-472a-a33e-ec4e15eadd8c"). InnerVolumeSpecName "kube-api-access-g8r4x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.746498 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-kube-api-access-xrf7z" (OuterVolumeSpecName: "kube-api-access-xrf7z") pod "a67b60a8-bbb4-471d-a0d5-da47ec4819d2" (UID: "a67b60a8-bbb4-471d-a0d5-da47ec4819d2"). InnerVolumeSpecName "kube-api-access-xrf7z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.746926 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-kube-api-access-2rzfz" (OuterVolumeSpecName: "kube-api-access-2rzfz") pod "45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" (UID: "45d96a0d-f31b-4afb-a528-58d7bbe8fe1b"). InnerVolumeSpecName "kube-api-access-2rzfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.809252 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a67b60a8-bbb4-471d-a0d5-da47ec4819d2" (UID: "a67b60a8-bbb4-471d-a0d5-da47ec4819d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.835953 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-catalog-content\") pod \"0b589483-946f-4931-8bae-7b38e37682b3\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836263 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jxphv\" (UniqueName: \"kubernetes.io/projected/0b589483-946f-4931-8bae-7b38e37682b3-kube-api-access-jxphv\") pod \"0b589483-946f-4931-8bae-7b38e37682b3\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836290 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-utilities\") pod \"0b589483-946f-4931-8bae-7b38e37682b3\" (UID: \"0b589483-946f-4931-8bae-7b38e37682b3\") " Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836622 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836647 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xrf7z\" (UniqueName: \"kubernetes.io/projected/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-kube-api-access-xrf7z\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836662 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-g8r4x\" (UniqueName: \"kubernetes.io/projected/1b442d58-fe80-472a-a33e-ec4e15eadd8c-kube-api-access-g8r4x\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836672 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836682 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2rzfz\" (UniqueName: \"kubernetes.io/projected/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-kube-api-access-2rzfz\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836691 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.836700 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a67b60a8-bbb4-471d-a0d5-da47ec4819d2-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.837451 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-utilities" (OuterVolumeSpecName: "utilities") pod "0b589483-946f-4931-8bae-7b38e37682b3" (UID: "0b589483-946f-4931-8bae-7b38e37682b3"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.841907 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b589483-946f-4931-8bae-7b38e37682b3-kube-api-access-jxphv" (OuterVolumeSpecName: "kube-api-access-jxphv") pod "0b589483-946f-4931-8bae-7b38e37682b3" (UID: "0b589483-946f-4931-8bae-7b38e37682b3"). InnerVolumeSpecName "kube-api-access-jxphv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.858827 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0b589483-946f-4931-8bae-7b38e37682b3" (UID: "0b589483-946f-4931-8bae-7b38e37682b3"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.893663 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" (UID: "45d96a0d-f31b-4afb-a528-58d7bbe8fe1b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.896851 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.898256 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.898949 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.899525 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.899841 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.900169 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.900445 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.900756 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.905419 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1b442d58-fe80-472a-a33e-ec4e15eadd8c" (UID: "1b442d58-fe80-472a-a33e-ec4e15eadd8c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.918829 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.920222 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.921015 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.921323 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.921820 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.922337 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.923852 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.924520 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.938698 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.938751 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.938769 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1b442d58-fe80-472a-a33e-ec4e15eadd8c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.938789 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jxphv\" (UniqueName: \"kubernetes.io/projected/0b589483-946f-4931-8bae-7b38e37682b3-kube-api-access-jxphv\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:19 crc kubenswrapper[5102]: I0123 06:58:19.938809 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0b589483-946f-4931-8bae-7b38e37682b3-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.039875 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4qlgt\" (UniqueName: \"kubernetes.io/projected/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-kube-api-access-4qlgt\") pod \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.039970 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-catalog-content\") pod \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.040014 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-utilities\") pod \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.040083 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-utilities\") pod \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.040115 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-catalog-content\") pod \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\" (UID: \"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d\") " Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.040174 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h4gx5\" (UniqueName: \"kubernetes.io/projected/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-kube-api-access-h4gx5\") pod \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\" (UID: \"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae\") " Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.041731 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-utilities" (OuterVolumeSpecName: "utilities") pod "2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" (UID: "2e9105da-eb2c-4ead-96d7-2ca6c190ef0d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.042306 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-utilities" (OuterVolumeSpecName: "utilities") pod "f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" (UID: "f7bf48e9-2a76-4bc6-ab13-0fe41c736aae"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.044168 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-kube-api-access-h4gx5" (OuterVolumeSpecName: "kube-api-access-h4gx5") pod "f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" (UID: "f7bf48e9-2a76-4bc6-ab13-0fe41c736aae"). InnerVolumeSpecName "kube-api-access-h4gx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.044503 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-kube-api-access-4qlgt" (OuterVolumeSpecName: "kube-api-access-4qlgt") pod "2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" (UID: "2e9105da-eb2c-4ead-96d7-2ca6c190ef0d"). InnerVolumeSpecName "kube-api-access-4qlgt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.064509 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" (UID: "f7bf48e9-2a76-4bc6-ab13-0fe41c736aae"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.101249 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" (UID: "2e9105da-eb2c-4ead-96d7-2ca6c190ef0d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.142128 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h4gx5\" (UniqueName: \"kubernetes.io/projected/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-kube-api-access-h4gx5\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.142192 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qlgt\" (UniqueName: \"kubernetes.io/projected/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-kube-api-access-4qlgt\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.142207 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.142221 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.142238 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.142252 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.188676 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b5948" event={"ID":"0b589483-946f-4931-8bae-7b38e37682b3","Type":"ContainerDied","Data":"c0c506e4454b3c1f305b8d5d60c8c78afd7552117af76ac7f3ce7d3cce334e2e"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.189268 5102 scope.go:117] "RemoveContainer" containerID="f6bf7c24330303cd409517ae9e3176fc3be3ed28aa52413612bf288c1abec462" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.188752 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b5948" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.190245 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.190616 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.191229 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.191463 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.191817 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.192135 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.192423 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.193006 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7h5pc" event={"ID":"f7bf48e9-2a76-4bc6-ab13-0fe41c736aae","Type":"ContainerDied","Data":"03e3d341abe59abaa9e2e8609578a6fd86e1aad3322af11a8e64c2b807eed26a"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.193031 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7h5pc" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.193560 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.193768 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.194078 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.194300 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.194585 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.194871 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.194998 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" event={"ID":"cda46479-cb25-47ec-8de7-31c9d6e22960","Type":"ContainerDied","Data":"c1aa8a5566dd8cbd318ebe5837aa3db1bb7c802c692d0c896ee2ca59ea863ad1"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.195028 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.195879 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.196503 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.196704 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.196973 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.197221 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.197410 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.197688 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.197887 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.200233 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.200425 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.200685 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.200865 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.201040 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.201230 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.201426 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.205506 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.205744 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.205935 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.206126 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.206333 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.206513 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.206713 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.208468 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-8hw9t" event={"ID":"1b442d58-fe80-472a-a33e-ec4e15eadd8c","Type":"ContainerDied","Data":"d14e85fe10b701f0d52a195c9f592f7c5b660601491209d55bfd7443333e0f91"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.208632 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-8hw9t" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.211778 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.215265 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.215507 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.215836 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.216329 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.216676 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.221257 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.221274 5102 scope.go:117] "RemoveContainer" containerID="84c2eea1725ca19fdc240f5aab958c4410dba652b4566603f75e4e9e40697d0a" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.222061 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.222249 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.222519 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.222868 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.223032 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.223189 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.223804 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.227029 5102 generic.go:334] "Generic (PLEG): container finished" podID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerID="68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf" exitCode=0 Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.227107 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sl6q6" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.227109 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sl6q6" event={"ID":"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d","Type":"ContainerDied","Data":"68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.227281 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sl6q6" event={"ID":"2e9105da-eb2c-4ead-96d7-2ca6c190ef0d","Type":"ContainerDied","Data":"68ea154e577a2e7993f2a594af997e1dfd8df90c5eddb469ad8863b8bd7c6055"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.227956 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.228276 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.228614 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.228841 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.229046 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.229502 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.229785 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.230053 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.230297 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.230479 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.230603 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.230735 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.230989 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.231258 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.231471 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.234063 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-cxz8x" event={"ID":"a67b60a8-bbb4-471d-a0d5-da47ec4819d2","Type":"ContainerDied","Data":"398c7f1cb2372b6868f22e6e0692d81d830c9c91f449fa0fe6dc038c72ff8458"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.234202 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-cxz8x" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.236160 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.236382 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.236631 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.236869 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.237083 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.237270 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.237448 5102 generic.go:334] "Generic (PLEG): container finished" podID="c50c373e-478c-4132-9fe4-883f61e7e308" containerID="5194b76da2edc01c2a826f50a8879b06791472b21982a285a55059f05363d2c7" exitCode=0 Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.237497 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c50c373e-478c-4132-9fe4-883f61e7e308","Type":"ContainerDied","Data":"5194b76da2edc01c2a826f50a8879b06791472b21982a285a55059f05363d2c7"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.237458 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.238139 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.238605 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.239045 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.239278 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.239481 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.239726 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.239924 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.240011 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nwnmg" event={"ID":"45d96a0d-f31b-4afb-a528-58d7bbe8fe1b","Type":"ContainerDied","Data":"947c50eacd7a7ab68ee01442471eb06cc525ac3be54ec2c01a6601be96898321"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.240026 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nwnmg" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.240163 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.240435 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.240656 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.240944 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.241162 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.241439 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.241742 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.241890 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"a18e019c4fc8e7ba391f1d1048ad1d0684b9d6acc91954457f54a8200f09685b"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.241927 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"18b49c421010fac2d068c7a62163636cf9c69f0217a468c8917269aa67b22a0c"} Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.241946 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.242857 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.242898 5102 kubelet.go:1929] "Failed creating a mirror pod for" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods\": dial tcp 38.102.83.195:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.243122 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.243411 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.244885 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.245117 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.251804 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.252275 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.252658 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.252929 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.253334 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.254750 5102 scope.go:117] "RemoveContainer" containerID="227c901cda76bb24eebfaf4aaaef7c3fc1700d174d5cfc0f9c3a0d31f61bddfa" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.258341 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.278464 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.289885 5102 scope.go:117] "RemoveContainer" containerID="f2d4fb7c0d3814e392dbc8ed9cacb935293418b7ead8a6f32de4fbfa9f449c3f" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.298130 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.312369 5102 scope.go:117] "RemoveContainer" containerID="8b8cd6b44af39aea24a42e6a8e7de7f8da063c03bbff3114a89bd075a14ab0aa" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.318966 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.322527 5102 log.go:32] "RunPodSandbox from runtime service failed" err=< Jan 23 06:58:20 crc kubenswrapper[5102]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1" Netns:"/var/run/netns/bb167c30-1bbf-468c-87ee-47775fd8bd0c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:58:20 crc kubenswrapper[5102]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 06:58:20 crc kubenswrapper[5102]: > Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.322619 5102 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Jan 23 06:58:20 crc kubenswrapper[5102]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1" Netns:"/var/run/netns/bb167c30-1bbf-468c-87ee-47775fd8bd0c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:58:20 crc kubenswrapper[5102]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 06:58:20 crc kubenswrapper[5102]: > pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.322643 5102 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Jan 23 06:58:20 crc kubenswrapper[5102]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1" Netns:"/var/run/netns/bb167c30-1bbf-468c-87ee-47775fd8bd0c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:58:20 crc kubenswrapper[5102]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 06:58:20 crc kubenswrapper[5102]: > pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.322720 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1\\\" Netns:\\\"/var/run/netns/bb167c30-1bbf-468c-87ee-47775fd8bd0c\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=707a45205b87ec55326d270952d2b0195f51dffb0207f11d46e3bb9de215e4d1;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s\\\": dial tcp 38.102.83.195:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.334243 5102 scope.go:117] "RemoveContainer" containerID="4739ed672c6b24d9cd7d1d7d88ca98a171e3b5197eb8ace6f558f23e94774a40" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.339428 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.352695 5102 scope.go:117] "RemoveContainer" containerID="4bd43375d5210559dbf648dc48059b34e1c725c4eb5226d8c2ca8573a840c338" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.358970 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.369020 5102 scope.go:117] "RemoveContainer" containerID="351c3231f89bda4286b4781d1b0a9ab78ec735edb17cce7b2b859732346b13ee" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.379320 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.385932 5102 scope.go:117] "RemoveContainer" containerID="24ac4692236247f5e39baaba7e80017233e482ac79c99c3b223c7b05cbedf074" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.398801 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.411107 5102 scope.go:117] "RemoveContainer" containerID="9a6273dc9aa78886f03e9356b92e462a9a30a6150ecd7acfc56209c5988e2353" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.418495 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.429348 5102 scope.go:117] "RemoveContainer" containerID="68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.439152 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.445167 5102 scope.go:117] "RemoveContainer" containerID="0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.458963 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.467933 5102 scope.go:117] "RemoveContainer" containerID="54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.478489 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.487100 5102 scope.go:117] "RemoveContainer" containerID="68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf" Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.488114 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf\": container with ID starting with 68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf not found: ID does not exist" containerID="68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.488200 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf"} err="failed to get container status \"68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf\": rpc error: code = NotFound desc = could not find container \"68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf\": container with ID starting with 68246257e7f95a858a07784796b117f98d3d1bb8de1c62a8e3dd7f770e3e4edf not found: ID does not exist" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.488251 5102 scope.go:117] "RemoveContainer" containerID="0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15" Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.488864 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15\": container with ID starting with 0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15 not found: ID does not exist" containerID="0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.488914 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15"} err="failed to get container status \"0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15\": rpc error: code = NotFound desc = could not find container \"0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15\": container with ID starting with 0926ce56d266ff0f4d43f0d8d236d994cc5ebe87ca9f6cdadd978269f3828f15 not found: ID does not exist" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.488952 5102 scope.go:117] "RemoveContainer" containerID="54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61" Jan 23 06:58:20 crc kubenswrapper[5102]: E0123 06:58:20.489519 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61\": container with ID starting with 54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61 not found: ID does not exist" containerID="54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.489633 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61"} err="failed to get container status \"54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61\": rpc error: code = NotFound desc = could not find container \"54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61\": container with ID starting with 54c49528aa52fe859d8763c3977ebd7db60265a6e7f0fcd9b604d1405235da61 not found: ID does not exist" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.489674 5102 scope.go:117] "RemoveContainer" containerID="08e689498065ace79853c92cc7afbf093dbc4defdc07b884075403b75ccdaf54" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.499268 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.510381 5102 scope.go:117] "RemoveContainer" containerID="f1c856083c73905fa667ee03d049531c9b1d7f1afb866b4cdbc6eb9abc8375fb" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.518627 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.529572 5102 scope.go:117] "RemoveContainer" containerID="f8424f91b6b63e7dcd121a227f98ec294243665d697937cef52d38d305f69d80" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.540006 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.550363 5102 scope.go:117] "RemoveContainer" containerID="643df5be6247c72b3d54aa4e4797bde5b4c2ef47beb92635169f61b0c0293eeb" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.565601 5102 scope.go:117] "RemoveContainer" containerID="5ebe91b262b728417cd5ad3066732603c880403bed578c19e0ff904a212dc9fb" Jan 23 06:58:20 crc kubenswrapper[5102]: I0123 06:58:20.585745 5102 scope.go:117] "RemoveContainer" containerID="ab4359e692538395e31eb783670b96649f5d4383f6c90a0424f32d146a1096b3" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.263092 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.264176 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.342655 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.343580 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.344448 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.345260 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.346009 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.346358 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.347084 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.347658 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.347963 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.348207 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.348489 5102 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.372800 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.372865 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.372943 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.373316 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.373357 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.373374 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.475314 5102 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.475854 5102 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.475869 5102 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.607640 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.630134 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.630767 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.631037 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.631309 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.631556 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.631813 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.632052 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.632320 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.632603 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.677406 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c50c373e-478c-4132-9fe4-883f61e7e308-kube-api-access\") pod \"c50c373e-478c-4132-9fe4-883f61e7e308\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.677525 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-var-lock\") pod \"c50c373e-478c-4132-9fe4-883f61e7e308\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.677608 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-kubelet-dir\") pod \"c50c373e-478c-4132-9fe4-883f61e7e308\" (UID: \"c50c373e-478c-4132-9fe4-883f61e7e308\") " Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.677664 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-var-lock" (OuterVolumeSpecName: "var-lock") pod "c50c373e-478c-4132-9fe4-883f61e7e308" (UID: "c50c373e-478c-4132-9fe4-883f61e7e308"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.677776 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "c50c373e-478c-4132-9fe4-883f61e7e308" (UID: "c50c373e-478c-4132-9fe4-883f61e7e308"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.677992 5102 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-kubelet-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.678027 5102 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/c50c373e-478c-4132-9fe4-883f61e7e308-var-lock\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.685851 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c50c373e-478c-4132-9fe4-883f61e7e308-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "c50c373e-478c-4132-9fe4-883f61e7e308" (UID: "c50c373e-478c-4132-9fe4-883f61e7e308"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:21 crc kubenswrapper[5102]: I0123 06:58:21.779236 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/c50c373e-478c-4132-9fe4-883f61e7e308-kube-api-access\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.101682 5102 log.go:32] "RunPodSandbox from runtime service failed" err=< Jan 23 06:58:22 crc kubenswrapper[5102]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b" Netns:"/var/run/netns/8166b90e-1f22-428b-8b85-65256b6d492c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:58:22 crc kubenswrapper[5102]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 06:58:22 crc kubenswrapper[5102]: > Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.101804 5102 kuberuntime_sandbox.go:72] "Failed to create sandbox for pod" err=< Jan 23 06:58:22 crc kubenswrapper[5102]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b" Netns:"/var/run/netns/8166b90e-1f22-428b-8b85-65256b6d492c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:58:22 crc kubenswrapper[5102]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 06:58:22 crc kubenswrapper[5102]: > pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.101977 5102 kuberuntime_manager.go:1170] "CreatePodSandbox for pod failed" err=< Jan 23 06:58:22 crc kubenswrapper[5102]: rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network "multus-cni-network": plugin type="multus-shim" name="multus-cni-network" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:"7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b" Netns:"/var/run/netns/8166b90e-1f22-428b-8b85-65256b6d492c" IfName:"eth0" Args:"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f" Path:"" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get "https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s": dial tcp 38.102.83.195:6443: connect: connection refused Jan 23 06:58:22 crc kubenswrapper[5102]: ': StdinData: {"binDir":"/var/lib/cni/bin","clusterNetwork":"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf","cniVersion":"0.3.1","daemonSocketDir":"/run/multus/socket","globalNamespaces":"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv","logLevel":"verbose","logToStderr":true,"name":"multus-cni-network","namespaceIsolation":true,"type":"multus-shim"} Jan 23 06:58:22 crc kubenswrapper[5102]: > pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.102068 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"CreatePodSandbox\" for \"marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\" with CreatePodSandboxError: \"Failed to create sandbox for pod \\\"marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\\\": rpc error: code = Unknown desc = failed to create pod network sandbox k8s_marketplace-operator-79b997595-wkhpj_openshift-marketplace_6273c4aa-9895-47ea-a3d6-9ac16123a30f_0(7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b): error adding pod openshift-marketplace_marketplace-operator-79b997595-wkhpj to CNI network \\\"multus-cni-network\\\": plugin type=\\\"multus-shim\\\" name=\\\"multus-cni-network\\\" failed (add): CmdAdd (shim): CNI request failed with status 400: 'ContainerID:\\\"7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b\\\" Netns:\\\"/var/run/netns/8166b90e-1f22-428b-8b85-65256b6d492c\\\" IfName:\\\"eth0\\\" Args:\\\"IgnoreUnknown=1;K8S_POD_NAMESPACE=openshift-marketplace;K8S_POD_NAME=marketplace-operator-79b997595-wkhpj;K8S_POD_INFRA_CONTAINER_ID=7c04e1a09af869c617db4836ca7168ec207d029b172b7cf8c34314af1e4feb7b;K8S_POD_UID=6273c4aa-9895-47ea-a3d6-9ac16123a30f\\\" Path:\\\"\\\" ERRORED: error configuring pod [openshift-marketplace/marketplace-operator-79b997595-wkhpj] networking: Multus: [openshift-marketplace/marketplace-operator-79b997595-wkhpj/6273c4aa-9895-47ea-a3d6-9ac16123a30f]: error setting the networks status: SetPodNetworkStatusAnnotation: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: SetNetworkStatus: failed to update the pod marketplace-operator-79b997595-wkhpj in out of cluster comm: status update failed for pod /: Get \\\"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-wkhpj?timeout=1m0s\\\": dial tcp 38.102.83.195:6443: connect: connection refused\\n': StdinData: {\\\"binDir\\\":\\\"/var/lib/cni/bin\\\",\\\"clusterNetwork\\\":\\\"/host/run/multus/cni/net.d/10-ovn-kubernetes.conf\\\",\\\"cniVersion\\\":\\\"0.3.1\\\",\\\"daemonSocketDir\\\":\\\"/run/multus/socket\\\",\\\"globalNamespaces\\\":\\\"default,openshift-multus,openshift-sriov-network-operator,openshift-cnv\\\",\\\"logLevel\\\":\\\"verbose\\\",\\\"logToStderr\\\":true,\\\"name\\\":\\\"multus-cni-network\\\",\\\"namespaceIsolation\\\":true,\\\"type\\\":\\\"multus-shim\\\"}\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.278584 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.279677 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a" exitCode=0 Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.279793 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.279790 5102 scope.go:117] "RemoveContainer" containerID="b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.280407 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.280700 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.281014 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.281576 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.282412 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.282761 5102 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.283009 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.283206 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.283366 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"c50c373e-478c-4132-9fe4-883f61e7e308","Type":"ContainerDied","Data":"e848f0c388bcb167d8e2bac8bc54424002f199ec44181408ecd0ed275e7777bb"} Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.283407 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e848f0c388bcb167d8e2bac8bc54424002f199ec44181408ecd0ed275e7777bb" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.283438 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.283598 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.286389 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.286677 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.286875 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.287033 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.287261 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.287449 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.287618 5102 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.287903 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.288075 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.298743 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.299093 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.299372 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.299699 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.300033 5102 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.300790 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.300978 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.301820 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.302063 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.312863 5102 scope.go:117] "RemoveContainer" containerID="dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.335374 5102 scope.go:117] "RemoveContainer" containerID="7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.353855 5102 scope.go:117] "RemoveContainer" containerID="0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.371072 5102 scope.go:117] "RemoveContainer" containerID="ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.396946 5102 scope.go:117] "RemoveContainer" containerID="eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.422863 5102 scope.go:117] "RemoveContainer" containerID="b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.423865 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\": container with ID starting with b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e not found: ID does not exist" containerID="b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.423901 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e"} err="failed to get container status \"b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\": rpc error: code = NotFound desc = could not find container \"b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e\": container with ID starting with b345a68cf767caa794a5dace30f1147d1e6c7f9ced6419d7cdea2ce316fa817e not found: ID does not exist" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.423932 5102 scope.go:117] "RemoveContainer" containerID="dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.424512 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\": container with ID starting with dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413 not found: ID does not exist" containerID="dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.424577 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413"} err="failed to get container status \"dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\": rpc error: code = NotFound desc = could not find container \"dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413\": container with ID starting with dfdc2eafb408f9c5a2b0305686a416bcccb91a6adea16504bc24592147032413 not found: ID does not exist" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.424614 5102 scope.go:117] "RemoveContainer" containerID="7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.425329 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\": container with ID starting with 7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e not found: ID does not exist" containerID="7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.425359 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e"} err="failed to get container status \"7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\": rpc error: code = NotFound desc = could not find container \"7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e\": container with ID starting with 7919752f81e5c2936552bffbe4adaae8b219bc0e948c13507005fefb824e6a1e not found: ID does not exist" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.425377 5102 scope.go:117] "RemoveContainer" containerID="0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.425801 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\": container with ID starting with 0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0 not found: ID does not exist" containerID="0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.425836 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0"} err="failed to get container status \"0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\": rpc error: code = NotFound desc = could not find container \"0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0\": container with ID starting with 0d133c40f713b55ce45ec757bae4f596bc78b3f8fb1d606c4aec8c53cb2f3af0 not found: ID does not exist" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.425861 5102 scope.go:117] "RemoveContainer" containerID="ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.426167 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\": container with ID starting with ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a not found: ID does not exist" containerID="ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.426197 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a"} err="failed to get container status \"ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\": rpc error: code = NotFound desc = could not find container \"ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a\": container with ID starting with ef43dbf7e9365d0c72c3c91a927e39512bbd84c09d97ce90a685d9b9ac56f30a not found: ID does not exist" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.426215 5102 scope.go:117] "RemoveContainer" containerID="eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db" Jan 23 06:58:22 crc kubenswrapper[5102]: E0123 06:58:22.426522 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\": container with ID starting with eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db not found: ID does not exist" containerID="eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db" Jan 23 06:58:22 crc kubenswrapper[5102]: I0123 06:58:22.426935 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db"} err="failed to get container status \"eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\": rpc error: code = NotFound desc = could not find container \"eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db\": container with ID starting with eb907a6d24f4830aea934f2e2533e34fc7e9f5bd64e0bf2eb8b2f929350945db not found: ID does not exist" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.606041 5102 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.607142 5102 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.607504 5102 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.607873 5102 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.608192 5102 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:24 crc kubenswrapper[5102]: I0123 06:58:24.608231 5102 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.608523 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="200ms" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.810103 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="400ms" Jan 23 06:58:24 crc kubenswrapper[5102]: E0123 06:58:24.922601 5102 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.102.83.195:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-sl6q6.188d49ea454a6509 openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-sl6q6,UID:2e9105da-eb2c-4ead-96d7-2ca6c190ef0d,APIVersion:v1,ResourceVersion:28484,FieldPath:spec.containers{registry-server},},Reason:Killing,Message:Stopping container registry-server,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2026-01-23 06:58:19.160151305 +0000 UTC m=+249.980500280,LastTimestamp:2026-01-23 06:58:19.160151305 +0000 UTC m=+249.980500280,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Jan 23 06:58:25 crc kubenswrapper[5102]: E0123 06:58:25.211519 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="800ms" Jan 23 06:58:26 crc kubenswrapper[5102]: E0123 06:58:26.012911 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="1.6s" Jan 23 06:58:26 crc kubenswrapper[5102]: E0123 06:58:26.656669 5102 desired_state_of_world_populator.go:312] "Error processing volume" err="error processing PVC openshift-image-registry/crc-image-registry-storage: failed to fetch PVC from API server: Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-image-registry/persistentvolumeclaims/crc-image-registry-storage\": dial tcp 38.102.83.195:6443: connect: connection refused" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" volumeName="registry-storage" Jan 23 06:58:27 crc kubenswrapper[5102]: E0123 06:58:27.614702 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="3.2s" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.601809 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.602671 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.603652 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.604377 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.604904 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.605427 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.606035 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:29 crc kubenswrapper[5102]: I0123 06:58:29.606420 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:30 crc kubenswrapper[5102]: E0123 06:58:30.815679 5102 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.102.83.195:6443: connect: connection refused" interval="6.4s" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.597478 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.598735 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.599395 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.600004 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.600362 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.600878 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.601214 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.601508 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.601812 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.617246 5102 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.617305 5102 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:31 crc kubenswrapper[5102]: E0123 06:58:31.618068 5102 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:31 crc kubenswrapper[5102]: I0123 06:58:31.618835 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.355832 5102 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="ee8ad2af779bdeb363dc8ed28ca82cec53637d8d5be0b365d8804e97d84b5b75" exitCode=0 Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.356304 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"ee8ad2af779bdeb363dc8ed28ca82cec53637d8d5be0b365d8804e97d84b5b75"} Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.356346 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"dc47bf0b0fd42b9cbecf5113142304bb69a64b1e3be8146c7b9c482d9a7c7087"} Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.356720 5102 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.356736 5102 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.357505 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: E0123 06:58:32.357574 5102 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.357727 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.358170 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.359336 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.360221 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.360479 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.360845 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.361291 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.361668 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.361731 5102 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82" exitCode=1 Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.361767 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82"} Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.362388 5102 scope.go:117] "RemoveContainer" containerID="477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.362885 5102 status_manager.go:851] "Failed to get status for pod" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.363355 5102 status_manager.go:851] "Failed to get status for pod" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" pod="openshift-marketplace/redhat-marketplace-7h5pc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-7h5pc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.363857 5102 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.364332 5102 status_manager.go:851] "Failed to get status for pod" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" pod="openshift-marketplace/certified-operators-sl6q6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-sl6q6\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.364900 5102 status_manager.go:851] "Failed to get status for pod" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" pod="openshift-marketplace/marketplace-operator-79b997595-bwhrv" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/marketplace-operator-79b997595-bwhrv\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.365394 5102 status_manager.go:851] "Failed to get status for pod" podUID="0b589483-946f-4931-8bae-7b38e37682b3" pod="openshift-marketplace/redhat-marketplace-b5948" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-marketplace-b5948\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.365936 5102 status_manager.go:851] "Failed to get status for pod" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" pod="openshift-marketplace/community-operators-cxz8x" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/community-operators-cxz8x\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.366395 5102 status_manager.go:851] "Failed to get status for pod" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" pod="openshift-marketplace/redhat-operators-nwnmg" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-nwnmg\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.367962 5102 status_manager.go:851] "Failed to get status for pod" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" pod="openshift-marketplace/redhat-operators-8hw9t" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/redhat-operators-8hw9t\": dial tcp 38.102.83.195:6443: connect: connection refused" Jan 23 06:58:32 crc kubenswrapper[5102]: I0123 06:58:32.601633 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:58:33 crc kubenswrapper[5102]: I0123 06:58:33.386505 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"71b8a0f0de165e30964f3c8ca0fd1b5012fdb52d7d4e5b0de7ed8169804ff3cf"} Jan 23 06:58:33 crc kubenswrapper[5102]: I0123 06:58:33.387130 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5a006baf43c5333726d873dd4c42f0525f13bb57a5b3418025bdb7338ef30330"} Jan 23 06:58:33 crc kubenswrapper[5102]: I0123 06:58:33.387147 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"1e390dd148b9fe3fec55e4ae89d35ea29561ae6136c1ca1f5c1cbd147eed642a"} Jan 23 06:58:33 crc kubenswrapper[5102]: I0123 06:58:33.387162 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"4e66e3a470b6daa840d90931fcf3d2895e66b3bd3ea7a069b27f3a72caa0149a"} Jan 23 06:58:33 crc kubenswrapper[5102]: I0123 06:58:33.390017 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 23 06:58:33 crc kubenswrapper[5102]: I0123 06:58:33.390098 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"90ce02475a921ac1cfa9d700c17285a4369c70616bc0b05519444b4064d7c3a6"} Jan 23 06:58:34 crc kubenswrapper[5102]: I0123 06:58:34.401028 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"21e3f33b9a9befe3d6a7d731304c1e03983cf4c723df88cbd0a7cc41eb385be2"} Jan 23 06:58:34 crc kubenswrapper[5102]: I0123 06:58:34.401671 5102 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:34 crc kubenswrapper[5102]: I0123 06:58:34.401717 5102 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:35 crc kubenswrapper[5102]: I0123 06:58:35.666998 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:58:35 crc kubenswrapper[5102]: I0123 06:58:35.667294 5102 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 23 06:58:35 crc kubenswrapper[5102]: I0123 06:58:35.667663 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 23 06:58:36 crc kubenswrapper[5102]: I0123 06:58:36.145982 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:58:36 crc kubenswrapper[5102]: I0123 06:58:36.619567 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:36 crc kubenswrapper[5102]: I0123 06:58:36.620350 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:36 crc kubenswrapper[5102]: I0123 06:58:36.627654 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:37 crc kubenswrapper[5102]: I0123 06:58:37.598306 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:37 crc kubenswrapper[5102]: I0123 06:58:37.599987 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.435101 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" event={"ID":"6273c4aa-9895-47ea-a3d6-9ac16123a30f","Type":"ContainerStarted","Data":"29e0249c7c0c051313cbe44743f17b3c90d5bbfc2b33da35a85c08ab5fc94c4a"} Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.436091 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" event={"ID":"6273c4aa-9895-47ea-a3d6-9ac16123a30f","Type":"ContainerStarted","Data":"683cc1682b270f6f4b1c54da0ba9fed61a1d8a39fc0d9fe28e4ec58ea604c491"} Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.436358 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.438910 5102 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wkhpj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" start-of-body= Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.439006 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.933433 5102 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wkhpj container/marketplace-operator namespace/openshift-marketplace: Liveness probe status=failure output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" start-of-body= Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.934054 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.933443 5102 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-wkhpj container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" start-of-body= Jan 23 06:58:38 crc kubenswrapper[5102]: I0123 06:58:38.934266 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.62:8080/healthz\": dial tcp 10.217.0.62:8080: connect: connection refused" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.410399 5102 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.444952 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/0.log" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.445012 5102 generic.go:334] "Generic (PLEG): container finished" podID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" containerID="29e0249c7c0c051313cbe44743f17b3c90d5bbfc2b33da35a85c08ab5fc94c4a" exitCode=1 Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.445098 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" event={"ID":"6273c4aa-9895-47ea-a3d6-9ac16123a30f","Type":"ContainerDied","Data":"29e0249c7c0c051313cbe44743f17b3c90d5bbfc2b33da35a85c08ab5fc94c4a"} Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.445517 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.445594 5102 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.445613 5102 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.445914 5102 scope.go:117] "RemoveContainer" containerID="29e0249c7c0c051313cbe44743f17b3c90d5bbfc2b33da35a85c08ab5fc94c4a" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.451447 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:39 crc kubenswrapper[5102]: I0123 06:58:39.617974 5102 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6f4f2bfd-ad46-4b06-b053-c88bdcc65f53" Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.454209 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/1.log" Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.455268 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/0.log" Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.455350 5102 generic.go:334] "Generic (PLEG): container finished" podID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" containerID="40e2957598955b436e95e60af6485518f582dc2df55d19fa61a01b2c5cdaea23" exitCode=1 Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.455413 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" event={"ID":"6273c4aa-9895-47ea-a3d6-9ac16123a30f","Type":"ContainerDied","Data":"40e2957598955b436e95e60af6485518f582dc2df55d19fa61a01b2c5cdaea23"} Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.455485 5102 scope.go:117] "RemoveContainer" containerID="29e0249c7c0c051313cbe44743f17b3c90d5bbfc2b33da35a85c08ab5fc94c4a" Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.456449 5102 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.456468 5102 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.456465 5102 scope.go:117] "RemoveContainer" containerID="40e2957598955b436e95e60af6485518f582dc2df55d19fa61a01b2c5cdaea23" Jan 23 06:58:40 crc kubenswrapper[5102]: E0123 06:58:40.457105 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:58:40 crc kubenswrapper[5102]: I0123 06:58:40.484392 5102 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6f4f2bfd-ad46-4b06-b053-c88bdcc65f53" Jan 23 06:58:41 crc kubenswrapper[5102]: I0123 06:58:41.386853 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" podUID="affdc918-cd11-4a65-8b67-0c4bc2bbadfc" containerName="oauth-openshift" containerID="cri-o://cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384" gracePeriod=15 Jan 23 06:58:41 crc kubenswrapper[5102]: I0123 06:58:41.465844 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/1.log" Jan 23 06:58:41 crc kubenswrapper[5102]: I0123 06:58:41.466179 5102 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:41 crc kubenswrapper[5102]: I0123 06:58:41.466202 5102 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:41 crc kubenswrapper[5102]: I0123 06:58:41.466898 5102 scope.go:117] "RemoveContainer" containerID="40e2957598955b436e95e60af6485518f582dc2df55d19fa61a01b2c5cdaea23" Jan 23 06:58:41 crc kubenswrapper[5102]: E0123 06:58:41.467181 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:58:41 crc kubenswrapper[5102]: I0123 06:58:41.489135 5102 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="6f4f2bfd-ad46-4b06-b053-c88bdcc65f53" Jan 23 06:58:41 crc kubenswrapper[5102]: I0123 06:58:41.826246 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425130 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-login\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425257 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-idp-0-file-data\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425285 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-ocp-branding-template\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425308 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-session\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425336 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-service-ca\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425359 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-dir\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425401 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-policies\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425419 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-trusted-ca-bundle\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425506 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-serving-cert\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425568 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hkqvw\" (UniqueName: \"kubernetes.io/projected/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-kube-api-access-hkqvw\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425589 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-router-certs\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425617 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-error\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425636 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-provider-selection\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.425669 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-cliconfig\") pod \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\" (UID: \"affdc918-cd11-4a65-8b67-0c4bc2bbadfc\") " Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.426431 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.426749 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.426847 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.426841 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.427746 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.433130 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.433488 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.433697 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.433868 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.433994 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.434327 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.434593 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.434636 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-kube-api-access-hkqvw" (OuterVolumeSpecName: "kube-api-access-hkqvw") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "kube-api-access-hkqvw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.436685 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "affdc918-cd11-4a65-8b67-0c4bc2bbadfc" (UID: "affdc918-cd11-4a65-8b67-0c4bc2bbadfc"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.474294 5102 generic.go:334] "Generic (PLEG): container finished" podID="affdc918-cd11-4a65-8b67-0c4bc2bbadfc" containerID="cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384" exitCode=0 Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.474358 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.474359 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" event={"ID":"affdc918-cd11-4a65-8b67-0c4bc2bbadfc","Type":"ContainerDied","Data":"cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384"} Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.474404 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-cwvr2" event={"ID":"affdc918-cd11-4a65-8b67-0c4bc2bbadfc","Type":"ContainerDied","Data":"d79afc7b483e629f12c0ea09a3d7add1c7d7f0a09ad305657637e7299f30d25b"} Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.474427 5102 scope.go:117] "RemoveContainer" containerID="cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.505399 5102 scope.go:117] "RemoveContainer" containerID="cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384" Jan 23 06:58:42 crc kubenswrapper[5102]: E0123 06:58:42.506436 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384\": container with ID starting with cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384 not found: ID does not exist" containerID="cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.506492 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384"} err="failed to get container status \"cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384\": rpc error: code = NotFound desc = could not find container \"cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384\": container with ID starting with cf4f0723cf2a62f39a2368147ebcbeb299220ee850355ab90a779905041b7384 not found: ID does not exist" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527094 5102 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-policies\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527156 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527174 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527187 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hkqvw\" (UniqueName: \"kubernetes.io/projected/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-kube-api-access-hkqvw\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527198 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527209 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527220 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527235 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527247 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527257 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527268 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527278 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527287 5102 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:42 crc kubenswrapper[5102]: I0123 06:58:42.527297 5102 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/affdc918-cd11-4a65-8b67-0c4bc2bbadfc-audit-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:58:45 crc kubenswrapper[5102]: I0123 06:58:45.667671 5102 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 23 06:58:45 crc kubenswrapper[5102]: I0123 06:58:45.668257 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 23 06:58:48 crc kubenswrapper[5102]: I0123 06:58:48.933073 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:48 crc kubenswrapper[5102]: I0123 06:58:48.933534 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:48 crc kubenswrapper[5102]: I0123 06:58:48.934483 5102 scope.go:117] "RemoveContainer" containerID="40e2957598955b436e95e60af6485518f582dc2df55d19fa61a01b2c5cdaea23" Jan 23 06:58:48 crc kubenswrapper[5102]: E0123 06:58:48.934733 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 10s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:58:49 crc kubenswrapper[5102]: I0123 06:58:49.539150 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Jan 23 06:58:50 crc kubenswrapper[5102]: I0123 06:58:50.339885 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 06:58:50 crc kubenswrapper[5102]: I0123 06:58:50.428837 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Jan 23 06:58:50 crc kubenswrapper[5102]: I0123 06:58:50.695123 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.026182 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.055011 5102 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.331346 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.349503 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.470305 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.488597 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.556016 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.631064 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.765260 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.831286 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.869089 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Jan 23 06:58:51 crc kubenswrapper[5102]: I0123 06:58:51.934811 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Jan 23 06:58:52 crc kubenswrapper[5102]: I0123 06:58:52.105443 5102 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Jan 23 06:58:52 crc kubenswrapper[5102]: I0123 06:58:52.629174 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Jan 23 06:58:52 crc kubenswrapper[5102]: I0123 06:58:52.776737 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.080205 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.097685 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.153327 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.403156 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.492176 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.656974 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.659333 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.689589 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.791816 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.800040 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.886384 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.937079 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Jan 23 06:58:53 crc kubenswrapper[5102]: I0123 06:58:53.951125 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.086016 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.088244 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.119378 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.165791 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.272441 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.429374 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.442946 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.507786 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.509084 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.582143 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.595412 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.648004 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.722999 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.809802 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Jan 23 06:58:54 crc kubenswrapper[5102]: I0123 06:58:54.925071 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.104656 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.349034 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.459497 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.488009 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.490849 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.666669 5102 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/kube-controller-manager namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" start-of-body= Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.666771 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" probeResult="failure" output="Get \"https://192.168.126.11:10257/healthz\": dial tcp 192.168.126.11:10257: connect: connection refused" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.666850 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.667729 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="kube-controller-manager" containerStatusID={"Type":"cri-o","ID":"90ce02475a921ac1cfa9d700c17285a4369c70616bc0b05519444b4064d7c3a6"} pod="openshift-kube-controller-manager/kube-controller-manager-crc" containerMessage="Container kube-controller-manager failed startup probe, will be restarted" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.667870 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="kube-controller-manager" containerID="cri-o://90ce02475a921ac1cfa9d700c17285a4369c70616bc0b05519444b4064d7c3a6" gracePeriod=30 Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.672744 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.682184 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.686237 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.819208 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.892762 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.944331 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.945141 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.970886 5102 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Jan 23 06:58:55 crc kubenswrapper[5102]: I0123 06:58:55.976619 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.017301 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.091917 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.115650 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.121964 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.130449 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.138053 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.254753 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.279522 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.303231 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.344324 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.357000 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.533148 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.541120 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.625123 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.648724 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.850824 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.877001 5102 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.882747 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-marketplace/redhat-marketplace-7h5pc","openshift-marketplace/redhat-marketplace-b5948","openshift-authentication/oauth-openshift-558db77b4-cwvr2","openshift-marketplace/redhat-operators-nwnmg","openshift-marketplace/redhat-operators-8hw9t","openshift-marketplace/marketplace-operator-79b997595-bwhrv","openshift-marketplace/community-operators-cxz8x","openshift-marketplace/certified-operators-sl6q6"] Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.882891 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.882925 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-wkhpj"] Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.883342 5102 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.883375 5102 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="8232b710-e236-49a9-9bfa-82ab28c7203c" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.884213 5102 scope.go:117] "RemoveContainer" containerID="40e2957598955b436e95e60af6485518f582dc2df55d19fa61a01b2c5cdaea23" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.884249 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.889514 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.937968 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=17.937938643 podStartE2EDuration="17.937938643s" podCreationTimestamp="2026-01-23 06:58:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:58:56.911818401 +0000 UTC m=+287.732167376" watchObservedRunningTime="2026-01-23 06:58:56.937938643 +0000 UTC m=+287.758287618" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.940713 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Jan 23 06:58:56 crc kubenswrapper[5102]: I0123 06:58:56.993774 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.049392 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.093903 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.276758 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.302500 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.336602 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.387156 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.399585 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.433984 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.524275 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.585403 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/2.log" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.586641 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/1.log" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.586725 5102 generic.go:334] "Generic (PLEG): container finished" podID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" containerID="771036d76d0c850d6c9319a6d081e97af354190051af63ec4c294849662e6cd4" exitCode=1 Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.586827 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" event={"ID":"6273c4aa-9895-47ea-a3d6-9ac16123a30f","Type":"ContainerDied","Data":"771036d76d0c850d6c9319a6d081e97af354190051af63ec4c294849662e6cd4"} Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.586937 5102 scope.go:117] "RemoveContainer" containerID="40e2957598955b436e95e60af6485518f582dc2df55d19fa61a01b2c5cdaea23" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.601891 5102 scope.go:117] "RemoveContainer" containerID="771036d76d0c850d6c9319a6d081e97af354190051af63ec4c294849662e6cd4" Jan 23 06:58:57 crc kubenswrapper[5102]: E0123 06:58:57.602906 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.613050 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.615815 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b589483-946f-4931-8bae-7b38e37682b3" path="/var/lib/kubelet/pods/0b589483-946f-4931-8bae-7b38e37682b3/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.616604 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" path="/var/lib/kubelet/pods/1b442d58-fe80-472a-a33e-ec4e15eadd8c/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.617271 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" path="/var/lib/kubelet/pods/2e9105da-eb2c-4ead-96d7-2ca6c190ef0d/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.618676 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" path="/var/lib/kubelet/pods/45d96a0d-f31b-4afb-a528-58d7bbe8fe1b/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.619300 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" path="/var/lib/kubelet/pods/a67b60a8-bbb4-471d-a0d5-da47ec4819d2/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.621015 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="affdc918-cd11-4a65-8b67-0c4bc2bbadfc" path="/var/lib/kubelet/pods/affdc918-cd11-4a65-8b67-0c4bc2bbadfc/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.622967 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" path="/var/lib/kubelet/pods/cda46479-cb25-47ec-8de7-31c9d6e22960/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.623576 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" path="/var/lib/kubelet/pods/f7bf48e9-2a76-4bc6-ab13-0fe41c736aae/volumes" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.681447 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.692120 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.721758 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.809069 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.835904 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.936842 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.936851 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.937643 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.938915 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.954135 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.964694 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.967827 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Jan 23 06:58:57 crc kubenswrapper[5102]: I0123 06:58:57.988934 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.019130 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.039664 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.099122 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.147925 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.152815 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.170839 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.290996 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.323897 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.383426 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.385474 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.409446 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.431741 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.481932 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.564806 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.567682 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.595201 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/2.log" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.650497 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.682355 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.717290 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.745776 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.841637 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.843757 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.914399 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.932551 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.932618 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.933384 5102 scope.go:117] "RemoveContainer" containerID="771036d76d0c850d6c9319a6d081e97af354190051af63ec4c294849662e6cd4" Jan 23 06:58:58 crc kubenswrapper[5102]: E0123 06:58:58.933648 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:58:58 crc kubenswrapper[5102]: I0123 06:58:58.947150 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.004440 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.049299 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.107890 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.199598 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.225407 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.246555 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.284296 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.395001 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.428358 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.452557 5102 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.460257 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.570013 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.588273 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.702308 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.748718 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.798232 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.843170 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.861075 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Jan 23 06:58:59 crc kubenswrapper[5102]: I0123 06:58:59.934036 5102 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.018883 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.107111 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.152166 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.250449 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.271840 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.304725 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.330511 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.371610 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.411027 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.453993 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.609132 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.619261 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.642842 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.708656 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.803218 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.868084 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.958022 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Jan 23 06:59:00 crc kubenswrapper[5102]: I0123 06:59:00.994598 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.002938 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.004923 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.012814 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.049868 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.115250 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.118611 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.191970 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.259302 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.288896 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.333197 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.409294 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.583351 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.723586 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.731378 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.808351 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.832183 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.865028 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.916385 5102 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.917141 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://a18e019c4fc8e7ba391f1d1048ad1d0684b9d6acc91954457f54a8200f09685b" gracePeriod=5 Jan 23 06:59:01 crc kubenswrapper[5102]: I0123 06:59:01.920939 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.054267 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.115670 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.116331 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.143509 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.212159 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.221615 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.229943 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.287220 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.547659 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.782097 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.827980 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.884016 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.885016 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.907504 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Jan 23 06:59:02 crc kubenswrapper[5102]: I0123 06:59:02.937700 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.017415 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.053568 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.094231 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5"] Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.095198 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.095275 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.095378 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.095456 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.095519 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.095615 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.095723 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.095786 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.095848 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" containerName="installer" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.095918 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" containerName="installer" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.095982 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096042 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.096107 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096167 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.096229 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096284 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.096343 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" containerName="marketplace-operator" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096406 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" containerName="marketplace-operator" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.096475 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096531 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.096659 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096713 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.096783 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096841 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.096907 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.096964 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097025 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="affdc918-cd11-4a65-8b67-0c4bc2bbadfc" containerName="oauth-openshift" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097079 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="affdc918-cd11-4a65-8b67-0c4bc2bbadfc" containerName="oauth-openshift" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097139 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097201 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097260 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097317 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097379 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097440 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097493 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097564 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097624 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097677 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097746 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097811 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="extract-utilities" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097869 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.097935 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: E0123 06:59:03.097994 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098053 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="extract-content" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098233 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0b589483-946f-4931-8bae-7b38e37682b3" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098315 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098377 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="c50c373e-478c-4132-9fe4-883f61e7e308" containerName="installer" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098447 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="cda46479-cb25-47ec-8de7-31c9d6e22960" containerName="marketplace-operator" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098506 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f7bf48e9-2a76-4bc6-ab13-0fe41c736aae" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098590 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b442d58-fe80-472a-a33e-ec4e15eadd8c" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098659 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="45d96a0d-f31b-4afb-a528-58d7bbe8fe1b" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098730 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="a67b60a8-bbb4-471d-a0d5-da47ec4819d2" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.098815 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="affdc918-cd11-4a65-8b67-0c4bc2bbadfc" containerName="oauth-openshift" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.099061 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2e9105da-eb2c-4ead-96d7-2ca6c190ef0d" containerName="registry-server" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.099882 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.102430 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.103554 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.105042 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.105140 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.105159 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.105172 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.105383 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.105793 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.108503 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.108747 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.108779 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.109097 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.115378 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.119516 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.120196 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5"] Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.124820 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.127629 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.133976 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.134069 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-router-certs\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.134111 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-login\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.134186 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-error\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.134215 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.134241 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-session\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.134274 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.134327 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.137009 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sfwk9\" (UniqueName: \"kubernetes.io/projected/3164db90-6997-425e-aff7-c981f0008a01-kube-api-access-sfwk9\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.137171 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-service-ca\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.140836 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.140934 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3164db90-6997-425e-aff7-c981f0008a01-audit-dir\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.141011 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-audit-policies\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.141070 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.210836 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.244052 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.242522 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245059 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245148 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-router-certs\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245183 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-login\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245222 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-error\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245284 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245322 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-session\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245409 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245477 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245558 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sfwk9\" (UniqueName: \"kubernetes.io/projected/3164db90-6997-425e-aff7-c981f0008a01-kube-api-access-sfwk9\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245599 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-service-ca\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245690 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245749 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3164db90-6997-425e-aff7-c981f0008a01-audit-dir\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.245833 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-audit-policies\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.246855 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-cliconfig\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.247165 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.247217 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-service-ca\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.247280 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/3164db90-6997-425e-aff7-c981f0008a01-audit-dir\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.247748 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/3164db90-6997-425e-aff7-c981f0008a01-audit-policies\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.251971 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.252068 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-router-certs\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.252163 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-session\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.254128 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.256360 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.257348 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-system-serving-cert\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.257483 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-login\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.262046 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/3164db90-6997-425e-aff7-c981f0008a01-v4-0-config-user-template-error\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.270908 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sfwk9\" (UniqueName: \"kubernetes.io/projected/3164db90-6997-425e-aff7-c981f0008a01-kube-api-access-sfwk9\") pod \"oauth-openshift-64c9cdcbb9-vzsm5\" (UID: \"3164db90-6997-425e-aff7-c981f0008a01\") " pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.314603 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.426572 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.502579 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.612620 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.782945 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.797200 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.893784 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5"] Jan 23 06:59:03 crc kubenswrapper[5102]: I0123 06:59:03.947917 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.080221 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.314710 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.430049 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.542032 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.638077 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" event={"ID":"3164db90-6997-425e-aff7-c981f0008a01","Type":"ContainerStarted","Data":"5df4d348388ced96bc5a54b2850543717284a11053dc1da872f78e7968d3981b"} Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.638146 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" event={"ID":"3164db90-6997-425e-aff7-c981f0008a01","Type":"ContainerStarted","Data":"021ef206b4db1582b666fbd4c575f012f0bfbc841554667c8a922a98bde2fd08"} Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.638391 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.661300 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" podStartSLOduration=48.661272163 podStartE2EDuration="48.661272163s" podCreationTimestamp="2026-01-23 06:58:16 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:59:04.660580401 +0000 UTC m=+295.480929386" watchObservedRunningTime="2026-01-23 06:59:04.661272163 +0000 UTC m=+295.481621138" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.669268 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.676963 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-64c9cdcbb9-vzsm5" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.715836 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.753888 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.845400 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.846759 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.877832 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.958844 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.959079 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.985152 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Jan 23 06:59:04 crc kubenswrapper[5102]: I0123 06:59:04.994078 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Jan 23 06:59:05 crc kubenswrapper[5102]: I0123 06:59:05.227381 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Jan 23 06:59:05 crc kubenswrapper[5102]: I0123 06:59:05.269860 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Jan 23 06:59:05 crc kubenswrapper[5102]: I0123 06:59:05.342462 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Jan 23 06:59:05 crc kubenswrapper[5102]: I0123 06:59:05.343722 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Jan 23 06:59:05 crc kubenswrapper[5102]: I0123 06:59:05.677713 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Jan 23 06:59:05 crc kubenswrapper[5102]: I0123 06:59:05.736429 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Jan 23 06:59:05 crc kubenswrapper[5102]: I0123 06:59:05.854352 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Jan 23 06:59:06 crc kubenswrapper[5102]: I0123 06:59:06.268684 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Jan 23 06:59:06 crc kubenswrapper[5102]: I0123 06:59:06.619999 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Jan 23 06:59:06 crc kubenswrapper[5102]: I0123 06:59:06.966418 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Jan 23 06:59:07 crc kubenswrapper[5102]: I0123 06:59:07.654427 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Jan 23 06:59:07 crc kubenswrapper[5102]: I0123 06:59:07.658747 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 23 06:59:07 crc kubenswrapper[5102]: I0123 06:59:07.658814 5102 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="a18e019c4fc8e7ba391f1d1048ad1d0684b9d6acc91954457f54a8200f09685b" exitCode=137 Jan 23 06:59:07 crc kubenswrapper[5102]: I0123 06:59:07.658875 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18b49c421010fac2d068c7a62163636cf9c69f0217a468c8917269aa67b22a0c" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.257478 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.257643 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330297 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330522 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330592 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330619 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330644 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330695 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330783 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330811 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.330908 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.331220 5102 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.331247 5102 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.331256 5102 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.331265 5102 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.343943 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.432797 5102 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:08 crc kubenswrapper[5102]: I0123 06:59:08.665309 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Jan 23 06:59:09 crc kubenswrapper[5102]: I0123 06:59:09.449498 5102 cert_rotation.go:91] certificate rotation detected, shutting down client connections to start using new credentials Jan 23 06:59:09 crc kubenswrapper[5102]: I0123 06:59:09.606301 5102 scope.go:117] "RemoveContainer" containerID="771036d76d0c850d6c9319a6d081e97af354190051af63ec4c294849662e6cd4" Jan 23 06:59:09 crc kubenswrapper[5102]: E0123 06:59:09.607563 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"marketplace-operator\" with CrashLoopBackOff: \"back-off 20s restarting failed container=marketplace-operator pod=marketplace-operator-79b997595-wkhpj_openshift-marketplace(6273c4aa-9895-47ea-a3d6-9ac16123a30f)\"" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podUID="6273c4aa-9895-47ea-a3d6-9ac16123a30f" Jan 23 06:59:09 crc kubenswrapper[5102]: I0123 06:59:09.618532 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Jan 23 06:59:24 crc kubenswrapper[5102]: I0123 06:59:24.598339 5102 scope.go:117] "RemoveContainer" containerID="771036d76d0c850d6c9319a6d081e97af354190051af63ec4c294849662e6cd4" Jan 23 06:59:24 crc kubenswrapper[5102]: I0123 06:59:24.772196 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/2.log" Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.780864 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/2.log" Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.781287 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" event={"ID":"6273c4aa-9895-47ea-a3d6-9ac16123a30f","Type":"ContainerStarted","Data":"2f729d8fbb7afc0f8df9147598e713be9cccf9c9042bf69e3323062ebe75d1a5"} Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.781775 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.783365 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.787466 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.787669 5102 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="90ce02475a921ac1cfa9d700c17285a4369c70616bc0b05519444b4064d7c3a6" exitCode=137 Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.788655 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.788706 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"90ce02475a921ac1cfa9d700c17285a4369c70616bc0b05519444b4064d7c3a6"} Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.788749 5102 scope.go:117] "RemoveContainer" containerID="477d9ade3489277d193c42b022759232d780a9fe15263be720b41769645c5c82" Jan 23 06:59:25 crc kubenswrapper[5102]: I0123 06:59:25.811355 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-wkhpj" podStartSLOduration=67.811322362 podStartE2EDuration="1m7.811322362s" podCreationTimestamp="2026-01-23 06:58:18 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:58:39.448441569 +0000 UTC m=+270.268790544" watchObservedRunningTime="2026-01-23 06:59:25.811322362 +0000 UTC m=+316.631671337" Jan 23 06:59:26 crc kubenswrapper[5102]: I0123 06:59:26.797898 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/1.log" Jan 23 06:59:26 crc kubenswrapper[5102]: I0123 06:59:26.800051 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"da92fc1c08dcf816acfe1879f66d1a1739486f96b7c6090374f04cf79017c5a0"} Jan 23 06:59:35 crc kubenswrapper[5102]: I0123 06:59:35.666428 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:59:35 crc kubenswrapper[5102]: I0123 06:59:35.672402 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:59:35 crc kubenswrapper[5102]: I0123 06:59:35.863447 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:59:36 crc kubenswrapper[5102]: I0123 06:59:36.150063 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.184495 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk"] Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.186439 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" podUID="a1bb46fe-01d2-426a-977c-057b7efbc8c0" containerName="route-controller-manager" containerID="cri-o://963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690" gracePeriod=30 Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.235018 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-649fffcbbd-mbdll"] Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.235749 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" podUID="07353947-651e-406f-8737-5ff7a2ef9cba" containerName="controller-manager" containerID="cri-o://8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518" gracePeriod=30 Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.708993 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.754964 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1bb46fe-01d2-426a-977c-057b7efbc8c0-serving-cert\") pod \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.755675 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-config\") pod \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.755756 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-client-ca\") pod \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.755851 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jvx9j\" (UniqueName: \"kubernetes.io/projected/a1bb46fe-01d2-426a-977c-057b7efbc8c0-kube-api-access-jvx9j\") pod \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\" (UID: \"a1bb46fe-01d2-426a-977c-057b7efbc8c0\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.756800 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-config" (OuterVolumeSpecName: "config") pod "a1bb46fe-01d2-426a-977c-057b7efbc8c0" (UID: "a1bb46fe-01d2-426a-977c-057b7efbc8c0"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.756932 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-client-ca" (OuterVolumeSpecName: "client-ca") pod "a1bb46fe-01d2-426a-977c-057b7efbc8c0" (UID: "a1bb46fe-01d2-426a-977c-057b7efbc8c0"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.762149 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a1bb46fe-01d2-426a-977c-057b7efbc8c0-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "a1bb46fe-01d2-426a-977c-057b7efbc8c0" (UID: "a1bb46fe-01d2-426a-977c-057b7efbc8c0"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.762193 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1bb46fe-01d2-426a-977c-057b7efbc8c0-kube-api-access-jvx9j" (OuterVolumeSpecName: "kube-api-access-jvx9j") pod "a1bb46fe-01d2-426a-977c-057b7efbc8c0" (UID: "a1bb46fe-01d2-426a-977c-057b7efbc8c0"). InnerVolumeSpecName "kube-api-access-jvx9j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.767050 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.858419 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9pf9t\" (UniqueName: \"kubernetes.io/projected/07353947-651e-406f-8737-5ff7a2ef9cba-kube-api-access-9pf9t\") pod \"07353947-651e-406f-8737-5ff7a2ef9cba\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.860234 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/07353947-651e-406f-8737-5ff7a2ef9cba-serving-cert\") pod \"07353947-651e-406f-8737-5ff7a2ef9cba\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.860403 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-client-ca\") pod \"07353947-651e-406f-8737-5ff7a2ef9cba\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.860625 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-proxy-ca-bundles\") pod \"07353947-651e-406f-8737-5ff7a2ef9cba\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.860752 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-config\") pod \"07353947-651e-406f-8737-5ff7a2ef9cba\" (UID: \"07353947-651e-406f-8737-5ff7a2ef9cba\") " Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.861336 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.861462 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/a1bb46fe-01d2-426a-977c-057b7efbc8c0-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.861604 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jvx9j\" (UniqueName: \"kubernetes.io/projected/a1bb46fe-01d2-426a-977c-057b7efbc8c0-kube-api-access-jvx9j\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.861737 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1bb46fe-01d2-426a-977c-057b7efbc8c0-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.861555 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-client-ca" (OuterVolumeSpecName: "client-ca") pod "07353947-651e-406f-8737-5ff7a2ef9cba" (UID: "07353947-651e-406f-8737-5ff7a2ef9cba"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.861699 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "07353947-651e-406f-8737-5ff7a2ef9cba" (UID: "07353947-651e-406f-8737-5ff7a2ef9cba"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.861888 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-config" (OuterVolumeSpecName: "config") pod "07353947-651e-406f-8737-5ff7a2ef9cba" (UID: "07353947-651e-406f-8737-5ff7a2ef9cba"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.862614 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07353947-651e-406f-8737-5ff7a2ef9cba-kube-api-access-9pf9t" (OuterVolumeSpecName: "kube-api-access-9pf9t") pod "07353947-651e-406f-8737-5ff7a2ef9cba" (UID: "07353947-651e-406f-8737-5ff7a2ef9cba"). InnerVolumeSpecName "kube-api-access-9pf9t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.864236 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/07353947-651e-406f-8737-5ff7a2ef9cba-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "07353947-651e-406f-8737-5ff7a2ef9cba" (UID: "07353947-651e-406f-8737-5ff7a2ef9cba"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.915161 5102 generic.go:334] "Generic (PLEG): container finished" podID="07353947-651e-406f-8737-5ff7a2ef9cba" containerID="8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518" exitCode=0 Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.915337 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" event={"ID":"07353947-651e-406f-8737-5ff7a2ef9cba","Type":"ContainerDied","Data":"8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518"} Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.915378 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" event={"ID":"07353947-651e-406f-8737-5ff7a2ef9cba","Type":"ContainerDied","Data":"3092b7ab962b19865a9700be9a70dba415bf558ba31f438598ca5079e2411ffd"} Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.915399 5102 scope.go:117] "RemoveContainer" containerID="8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.916088 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-649fffcbbd-mbdll" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.919078 5102 generic.go:334] "Generic (PLEG): container finished" podID="a1bb46fe-01d2-426a-977c-057b7efbc8c0" containerID="963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690" exitCode=0 Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.919119 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" event={"ID":"a1bb46fe-01d2-426a-977c-057b7efbc8c0","Type":"ContainerDied","Data":"963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690"} Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.919170 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.919159 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk" event={"ID":"a1bb46fe-01d2-426a-977c-057b7efbc8c0","Type":"ContainerDied","Data":"34de7c971e316227a662b88f3d86bbed9c18a1fb1cd0ebae195496c1404a0ff0"} Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.942231 5102 scope.go:117] "RemoveContainer" containerID="8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518" Jan 23 06:59:43 crc kubenswrapper[5102]: E0123 06:59:43.943103 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518\": container with ID starting with 8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518 not found: ID does not exist" containerID="8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.943142 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518"} err="failed to get container status \"8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518\": rpc error: code = NotFound desc = could not find container \"8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518\": container with ID starting with 8675c6097b2bc142a4278cfde0d7e0287c0536ea44521db58152eef2d1039518 not found: ID does not exist" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.943193 5102 scope.go:117] "RemoveContainer" containerID="963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.952694 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk"] Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.955778 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7df6fd7b5b-fl6wk"] Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.963062 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9pf9t\" (UniqueName: \"kubernetes.io/projected/07353947-651e-406f-8737-5ff7a2ef9cba-kube-api-access-9pf9t\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.963093 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/07353947-651e-406f-8737-5ff7a2ef9cba-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.963108 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.963120 5102 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.963133 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/07353947-651e-406f-8737-5ff7a2ef9cba-config\") on node \"crc\" DevicePath \"\"" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.972345 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-649fffcbbd-mbdll"] Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.972726 5102 scope.go:117] "RemoveContainer" containerID="963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690" Jan 23 06:59:43 crc kubenswrapper[5102]: E0123 06:59:43.973308 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690\": container with ID starting with 963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690 not found: ID does not exist" containerID="963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.973352 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690"} err="failed to get container status \"963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690\": rpc error: code = NotFound desc = could not find container \"963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690\": container with ID starting with 963a4cebe372789707536439777f9bf4f07e74859ae95e3b94126e338ff03690 not found: ID does not exist" Jan 23 06:59:43 crc kubenswrapper[5102]: I0123 06:59:43.975773 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-649fffcbbd-mbdll"] Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.124349 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr"] Jan 23 06:59:45 crc kubenswrapper[5102]: E0123 06:59:45.125263 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="07353947-651e-406f-8737-5ff7a2ef9cba" containerName="controller-manager" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.125283 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="07353947-651e-406f-8737-5ff7a2ef9cba" containerName="controller-manager" Jan 23 06:59:45 crc kubenswrapper[5102]: E0123 06:59:45.125305 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1bb46fe-01d2-426a-977c-057b7efbc8c0" containerName="route-controller-manager" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.125314 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1bb46fe-01d2-426a-977c-057b7efbc8c0" containerName="route-controller-manager" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.125459 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1bb46fe-01d2-426a-977c-057b7efbc8c0" containerName="route-controller-manager" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.125488 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="07353947-651e-406f-8737-5ff7a2ef9cba" containerName="controller-manager" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.126335 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.128137 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4"] Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.128967 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133023 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133127 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133307 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133349 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133393 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133573 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133622 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.133739 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.134088 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.138031 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.138043 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.138241 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.143074 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.148987 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr"] Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.157831 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4"] Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.180835 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/734e7bac-8fe1-4a79-a4e3-35591146c2a5-serving-cert\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.180922 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-config\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.180957 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-client-ca\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.180990 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r95p2\" (UniqueName: \"kubernetes.io/projected/734e7bac-8fe1-4a79-a4e3-35591146c2a5-kube-api-access-r95p2\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.181281 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qsnlr\" (UniqueName: \"kubernetes.io/projected/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-kube-api-access-qsnlr\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.181370 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-proxy-ca-bundles\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.181601 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-config\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.181759 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-serving-cert\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.181841 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-client-ca\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283317 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-serving-cert\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283375 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-client-ca\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283414 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/734e7bac-8fe1-4a79-a4e3-35591146c2a5-serving-cert\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283458 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-config\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283476 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-client-ca\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283496 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r95p2\" (UniqueName: \"kubernetes.io/projected/734e7bac-8fe1-4a79-a4e3-35591146c2a5-kube-api-access-r95p2\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283526 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qsnlr\" (UniqueName: \"kubernetes.io/projected/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-kube-api-access-qsnlr\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283563 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-proxy-ca-bundles\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.283606 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-config\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.284906 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-client-ca\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.285774 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-client-ca\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.285933 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-config\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.285960 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-proxy-ca-bundles\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.286903 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/734e7bac-8fe1-4a79-a4e3-35591146c2a5-config\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.306604 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/734e7bac-8fe1-4a79-a4e3-35591146c2a5-serving-cert\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.306604 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-serving-cert\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.318380 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qsnlr\" (UniqueName: \"kubernetes.io/projected/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-kube-api-access-qsnlr\") pod \"route-controller-manager-7447957dcb-wdsh4\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.319254 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r95p2\" (UniqueName: \"kubernetes.io/projected/734e7bac-8fe1-4a79-a4e3-35591146c2a5-kube-api-access-r95p2\") pod \"controller-manager-5774c8b4bb-pq4gr\" (UID: \"734e7bac-8fe1-4a79-a4e3-35591146c2a5\") " pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.449445 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.463092 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.611282 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07353947-651e-406f-8737-5ff7a2ef9cba" path="/var/lib/kubelet/pods/07353947-651e-406f-8737-5ff7a2ef9cba/volumes" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.612351 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1bb46fe-01d2-426a-977c-057b7efbc8c0" path="/var/lib/kubelet/pods/a1bb46fe-01d2-426a-977c-057b7efbc8c0/volumes" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.712652 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr"] Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.764343 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4"] Jan 23 06:59:45 crc kubenswrapper[5102]: W0123 06:59:45.766974 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf2a15c59_1d14_41ff_842e_f2e3b7c7283d.slice/crio-46c4014b2d1f92b0f787a7ddd5e61c387cbf5dfb6b891ebe30653ffba8a7f749 WatchSource:0}: Error finding container 46c4014b2d1f92b0f787a7ddd5e61c387cbf5dfb6b891ebe30653ffba8a7f749: Status 404 returned error can't find the container with id 46c4014b2d1f92b0f787a7ddd5e61c387cbf5dfb6b891ebe30653ffba8a7f749 Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.935688 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" event={"ID":"f2a15c59-1d14-41ff-842e-f2e3b7c7283d","Type":"ContainerStarted","Data":"527e64dafb37c22e7714c2f0a955b5dd06cf69f342b2bbc559c8085ce7d99758"} Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.935751 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" event={"ID":"f2a15c59-1d14-41ff-842e-f2e3b7c7283d","Type":"ContainerStarted","Data":"46c4014b2d1f92b0f787a7ddd5e61c387cbf5dfb6b891ebe30653ffba8a7f749"} Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.935990 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.937520 5102 patch_prober.go:28] interesting pod/route-controller-manager-7447957dcb-wdsh4 container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.65:8443/healthz\": dial tcp 10.217.0.65:8443: connect: connection refused" start-of-body= Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.937593 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" podUID="f2a15c59-1d14-41ff-842e-f2e3b7c7283d" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.65:8443/healthz\": dial tcp 10.217.0.65:8443: connect: connection refused" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.937644 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" event={"ID":"734e7bac-8fe1-4a79-a4e3-35591146c2a5","Type":"ContainerStarted","Data":"99bf07a4f4c238b9311104dd6167cefa952e36c67e13f19f428704c18f150444"} Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.937710 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" event={"ID":"734e7bac-8fe1-4a79-a4e3-35591146c2a5","Type":"ContainerStarted","Data":"df9f1241aca59153a52d1677ceaf735f32986d05b4e52aec4dd2841a40ff09ad"} Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.937919 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.939741 5102 patch_prober.go:28] interesting pod/controller-manager-5774c8b4bb-pq4gr container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" start-of-body= Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.939805 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" podUID="734e7bac-8fe1-4a79-a4e3-35591146c2a5" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.64:8443/healthz\": dial tcp 10.217.0.64:8443: connect: connection refused" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.962147 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" podStartSLOduration=2.9621215850000002 podStartE2EDuration="2.962121585s" podCreationTimestamp="2026-01-23 06:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:59:45.956285978 +0000 UTC m=+336.776634973" watchObservedRunningTime="2026-01-23 06:59:45.962121585 +0000 UTC m=+336.782470560" Jan 23 06:59:45 crc kubenswrapper[5102]: I0123 06:59:45.982588 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" podStartSLOduration=2.982515458 podStartE2EDuration="2.982515458s" podCreationTimestamp="2026-01-23 06:59:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 06:59:45.974257623 +0000 UTC m=+336.794606618" watchObservedRunningTime="2026-01-23 06:59:45.982515458 +0000 UTC m=+336.802864433" Jan 23 06:59:46 crc kubenswrapper[5102]: I0123 06:59:46.953641 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5774c8b4bb-pq4gr" Jan 23 06:59:46 crc kubenswrapper[5102]: I0123 06:59:46.954475 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.597076 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-szq9g"] Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.600290 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.606129 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.611518 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-szq9g"] Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.650089 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5z6qg\" (UniqueName: \"kubernetes.io/projected/229c9d86-29ca-4872-8819-231653c292de-kube-api-access-5z6qg\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.650188 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/229c9d86-29ca-4872-8819-231653c292de-catalog-content\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.650224 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/229c9d86-29ca-4872-8819-231653c292de-utilities\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.751889 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5z6qg\" (UniqueName: \"kubernetes.io/projected/229c9d86-29ca-4872-8819-231653c292de-kube-api-access-5z6qg\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.751986 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/229c9d86-29ca-4872-8819-231653c292de-catalog-content\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.752022 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/229c9d86-29ca-4872-8819-231653c292de-utilities\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.752724 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/229c9d86-29ca-4872-8819-231653c292de-utilities\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.753228 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/229c9d86-29ca-4872-8819-231653c292de-catalog-content\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.777121 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5z6qg\" (UniqueName: \"kubernetes.io/projected/229c9d86-29ca-4872-8819-231653c292de-kube-api-access-5z6qg\") pod \"redhat-operators-szq9g\" (UID: \"229c9d86-29ca-4872-8819-231653c292de\") " pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.801219 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-czpf6"] Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.804503 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.807451 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.809080 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-czpf6"] Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.853207 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-svqsz\" (UniqueName: \"kubernetes.io/projected/e2c8263d-fc51-48be-80e5-284ebef0b5e2-kube-api-access-svqsz\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.853312 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2c8263d-fc51-48be-80e5-284ebef0b5e2-utilities\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.853398 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2c8263d-fc51-48be-80e5-284ebef0b5e2-catalog-content\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.954797 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2c8263d-fc51-48be-80e5-284ebef0b5e2-catalog-content\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.954904 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-svqsz\" (UniqueName: \"kubernetes.io/projected/e2c8263d-fc51-48be-80e5-284ebef0b5e2-kube-api-access-svqsz\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.954952 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2c8263d-fc51-48be-80e5-284ebef0b5e2-utilities\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.955676 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e2c8263d-fc51-48be-80e5-284ebef0b5e2-utilities\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.956060 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e2c8263d-fc51-48be-80e5-284ebef0b5e2-catalog-content\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.976273 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 06:59:55 crc kubenswrapper[5102]: I0123 06:59:55.977848 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-svqsz\" (UniqueName: \"kubernetes.io/projected/e2c8263d-fc51-48be-80e5-284ebef0b5e2-kube-api-access-svqsz\") pod \"community-operators-czpf6\" (UID: \"e2c8263d-fc51-48be-80e5-284ebef0b5e2\") " pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:56 crc kubenswrapper[5102]: I0123 06:59:56.134728 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-czpf6" Jan 23 06:59:56 crc kubenswrapper[5102]: I0123 06:59:56.401605 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-szq9g"] Jan 23 06:59:56 crc kubenswrapper[5102]: I0123 06:59:56.532428 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-czpf6"] Jan 23 06:59:56 crc kubenswrapper[5102]: W0123 06:59:56.535496 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pode2c8263d_fc51_48be_80e5_284ebef0b5e2.slice/crio-17c6615e0333c3a773cd9331b4bbd961b7c3424c6352cb983ce74b6cfb3cf2c7 WatchSource:0}: Error finding container 17c6615e0333c3a773cd9331b4bbd961b7c3424c6352cb983ce74b6cfb3cf2c7: Status 404 returned error can't find the container with id 17c6615e0333c3a773cd9331b4bbd961b7c3424c6352cb983ce74b6cfb3cf2c7 Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.027728 5102 generic.go:334] "Generic (PLEG): container finished" podID="229c9d86-29ca-4872-8819-231653c292de" containerID="2f8434709a0c16872e915dbdd26c191b6bdb4df49b60d5469b72f3948a65f6a7" exitCode=0 Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.027782 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-szq9g" event={"ID":"229c9d86-29ca-4872-8819-231653c292de","Type":"ContainerDied","Data":"2f8434709a0c16872e915dbdd26c191b6bdb4df49b60d5469b72f3948a65f6a7"} Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.027838 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-szq9g" event={"ID":"229c9d86-29ca-4872-8819-231653c292de","Type":"ContainerStarted","Data":"a647d70a9f69993bfd18149ac9d5f6d96a36d8a65d7c46618cec5c4ee9b3f140"} Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.029319 5102 generic.go:334] "Generic (PLEG): container finished" podID="e2c8263d-fc51-48be-80e5-284ebef0b5e2" containerID="327f595ad678df424dd0e2a120470cbb9b99eead8e86400e5a96bd048ee4ad1c" exitCode=0 Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.029376 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czpf6" event={"ID":"e2c8263d-fc51-48be-80e5-284ebef0b5e2","Type":"ContainerDied","Data":"327f595ad678df424dd0e2a120470cbb9b99eead8e86400e5a96bd048ee4ad1c"} Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.029401 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czpf6" event={"ID":"e2c8263d-fc51-48be-80e5-284ebef0b5e2","Type":"ContainerStarted","Data":"17c6615e0333c3a773cd9331b4bbd961b7c3424c6352cb983ce74b6cfb3cf2c7"} Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.995514 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sqprz"] Jan 23 06:59:57 crc kubenswrapper[5102]: I0123 06:59:57.997706 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.001383 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.012948 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sqprz"] Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.060592 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czpf6" event={"ID":"e2c8263d-fc51-48be-80e5-284ebef0b5e2","Type":"ContainerStarted","Data":"3d14391ec31d1ca33059f6ad235a13e19a018902990378bfb8101597f98dbd68"} Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.095860 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-utilities\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.095992 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-catalog-content\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.096051 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-54crz\" (UniqueName: \"kubernetes.io/projected/2bce5a3d-171e-4219-87d8-827d2101389a-kube-api-access-54crz\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.195509 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-92s74"] Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.197339 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.197420 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-utilities\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.197494 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-catalog-content\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.197524 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-54crz\" (UniqueName: \"kubernetes.io/projected/2bce5a3d-171e-4219-87d8-827d2101389a-kube-api-access-54crz\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.198253 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-utilities\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.198296 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-catalog-content\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.200718 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.210452 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-92s74"] Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.228846 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-54crz\" (UniqueName: \"kubernetes.io/projected/2bce5a3d-171e-4219-87d8-827d2101389a-kube-api-access-54crz\") pod \"certified-operators-sqprz\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.299247 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jcn2w\" (UniqueName: \"kubernetes.io/projected/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-kube-api-access-jcn2w\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.299313 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-catalog-content\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.299366 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-utilities\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.376807 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.400703 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jcn2w\" (UniqueName: \"kubernetes.io/projected/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-kube-api-access-jcn2w\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.400788 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-catalog-content\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.400889 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-utilities\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.401596 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-utilities\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.402473 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-catalog-content\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.423460 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jcn2w\" (UniqueName: \"kubernetes.io/projected/e68a118c-0a6c-48fb-837a-b19bb6d00b0b-kube-api-access-jcn2w\") pod \"redhat-marketplace-92s74\" (UID: \"e68a118c-0a6c-48fb-837a-b19bb6d00b0b\") " pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.515628 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.812530 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sqprz"] Jan 23 06:59:58 crc kubenswrapper[5102]: W0123 06:59:58.816225 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2bce5a3d_171e_4219_87d8_827d2101389a.slice/crio-ecab60b4b8a1cfd596384b3abe2f1c84cdb87e4ccbcd9e3388c74c9fab229b1e WatchSource:0}: Error finding container ecab60b4b8a1cfd596384b3abe2f1c84cdb87e4ccbcd9e3388c74c9fab229b1e: Status 404 returned error can't find the container with id ecab60b4b8a1cfd596384b3abe2f1c84cdb87e4ccbcd9e3388c74c9fab229b1e Jan 23 06:59:58 crc kubenswrapper[5102]: I0123 06:59:58.949082 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-92s74"] Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.068351 5102 generic.go:334] "Generic (PLEG): container finished" podID="2bce5a3d-171e-4219-87d8-827d2101389a" containerID="e1e7c8880806729317b61cae37fea5fca60938dccc389f9310c328bc5a645716" exitCode=0 Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.068450 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sqprz" event={"ID":"2bce5a3d-171e-4219-87d8-827d2101389a","Type":"ContainerDied","Data":"e1e7c8880806729317b61cae37fea5fca60938dccc389f9310c328bc5a645716"} Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.068587 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sqprz" event={"ID":"2bce5a3d-171e-4219-87d8-827d2101389a","Type":"ContainerStarted","Data":"ecab60b4b8a1cfd596384b3abe2f1c84cdb87e4ccbcd9e3388c74c9fab229b1e"} Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.070771 5102 generic.go:334] "Generic (PLEG): container finished" podID="229c9d86-29ca-4872-8819-231653c292de" containerID="9363292443c5b3f12438076f9db2e9754c15ed197afce20c55679a5e8b4fc696" exitCode=0 Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.070842 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-szq9g" event={"ID":"229c9d86-29ca-4872-8819-231653c292de","Type":"ContainerDied","Data":"9363292443c5b3f12438076f9db2e9754c15ed197afce20c55679a5e8b4fc696"} Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.073152 5102 generic.go:334] "Generic (PLEG): container finished" podID="e2c8263d-fc51-48be-80e5-284ebef0b5e2" containerID="3d14391ec31d1ca33059f6ad235a13e19a018902990378bfb8101597f98dbd68" exitCode=0 Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.073214 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czpf6" event={"ID":"e2c8263d-fc51-48be-80e5-284ebef0b5e2","Type":"ContainerDied","Data":"3d14391ec31d1ca33059f6ad235a13e19a018902990378bfb8101597f98dbd68"} Jan 23 06:59:59 crc kubenswrapper[5102]: I0123 06:59:59.076057 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-92s74" event={"ID":"e68a118c-0a6c-48fb-837a-b19bb6d00b0b","Type":"ContainerStarted","Data":"7dea50f471d549ab0f881ad91a52561d371168c6dc254be7e46f619a7714aece"} Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.086000 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sqprz" event={"ID":"2bce5a3d-171e-4219-87d8-827d2101389a","Type":"ContainerStarted","Data":"907f8e8469d7a7e48fa966065328c9745519be5d120e9aa3b3ef6d5474c7d8e2"} Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.088875 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-szq9g" event={"ID":"229c9d86-29ca-4872-8819-231653c292de","Type":"ContainerStarted","Data":"b89c3af61615f8a090f9f975f0403cbf57bc4eb34776c62fbd83f534ee2b3050"} Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.091652 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-czpf6" event={"ID":"e2c8263d-fc51-48be-80e5-284ebef0b5e2","Type":"ContainerStarted","Data":"85e389daf59b7d0423227ea8a9daca4ce9fbd5a614d08af702581beb2fa5d2c9"} Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.094000 5102 generic.go:334] "Generic (PLEG): container finished" podID="e68a118c-0a6c-48fb-837a-b19bb6d00b0b" containerID="0ad934f363cdf59e995baf3b589cb0a014186f19acec04c8e4d8c259a433567b" exitCode=0 Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.094089 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-92s74" event={"ID":"e68a118c-0a6c-48fb-837a-b19bb6d00b0b","Type":"ContainerDied","Data":"0ad934f363cdf59e995baf3b589cb0a014186f19acec04c8e4d8c259a433567b"} Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.140191 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-czpf6" podStartSLOduration=2.3864361499999998 podStartE2EDuration="5.140167202s" podCreationTimestamp="2026-01-23 06:59:55 +0000 UTC" firstStartedPulling="2026-01-23 06:59:57.031398824 +0000 UTC m=+347.851747799" lastFinishedPulling="2026-01-23 06:59:59.785129876 +0000 UTC m=+350.605478851" observedRunningTime="2026-01-23 07:00:00.134376796 +0000 UTC m=+350.954725801" watchObservedRunningTime="2026-01-23 07:00:00.140167202 +0000 UTC m=+350.960516177" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.188844 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-szq9g" podStartSLOduration=2.707697015 podStartE2EDuration="5.18882159s" podCreationTimestamp="2026-01-23 06:59:55 +0000 UTC" firstStartedPulling="2026-01-23 06:59:57.030923169 +0000 UTC m=+347.851272154" lastFinishedPulling="2026-01-23 06:59:59.512047754 +0000 UTC m=+350.332396729" observedRunningTime="2026-01-23 07:00:00.186222697 +0000 UTC m=+351.006571672" watchObservedRunningTime="2026-01-23 07:00:00.18882159 +0000 UTC m=+351.009170575" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.203470 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z"] Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.204404 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.207331 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.207436 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.221259 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z"] Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.330946 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zdbng\" (UniqueName: \"kubernetes.io/projected/53cdfad3-0375-4019-bc82-0240a66527c6-kube-api-access-zdbng\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.331478 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53cdfad3-0375-4019-bc82-0240a66527c6-config-volume\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.331584 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53cdfad3-0375-4019-bc82-0240a66527c6-secret-volume\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.433269 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53cdfad3-0375-4019-bc82-0240a66527c6-secret-volume\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.433317 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53cdfad3-0375-4019-bc82-0240a66527c6-config-volume\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.433342 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zdbng\" (UniqueName: \"kubernetes.io/projected/53cdfad3-0375-4019-bc82-0240a66527c6-kube-api-access-zdbng\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.434440 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53cdfad3-0375-4019-bc82-0240a66527c6-config-volume\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.442338 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53cdfad3-0375-4019-bc82-0240a66527c6-secret-volume\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.450176 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zdbng\" (UniqueName: \"kubernetes.io/projected/53cdfad3-0375-4019-bc82-0240a66527c6-kube-api-access-zdbng\") pod \"collect-profiles-29485860-v568z\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.523013 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:00 crc kubenswrapper[5102]: I0123 07:00:00.963066 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z"] Jan 23 07:00:01 crc kubenswrapper[5102]: I0123 07:00:01.102699 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" event={"ID":"53cdfad3-0375-4019-bc82-0240a66527c6","Type":"ContainerStarted","Data":"eb98344b8f13debb855e89ba5caa3f1960f09e68e164e3d0c4661f2d23611f14"} Jan 23 07:00:01 crc kubenswrapper[5102]: I0123 07:00:01.107576 5102 generic.go:334] "Generic (PLEG): container finished" podID="2bce5a3d-171e-4219-87d8-827d2101389a" containerID="907f8e8469d7a7e48fa966065328c9745519be5d120e9aa3b3ef6d5474c7d8e2" exitCode=0 Jan 23 07:00:01 crc kubenswrapper[5102]: I0123 07:00:01.107970 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sqprz" event={"ID":"2bce5a3d-171e-4219-87d8-827d2101389a","Type":"ContainerDied","Data":"907f8e8469d7a7e48fa966065328c9745519be5d120e9aa3b3ef6d5474c7d8e2"} Jan 23 07:00:02 crc kubenswrapper[5102]: I0123 07:00:02.118110 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sqprz" event={"ID":"2bce5a3d-171e-4219-87d8-827d2101389a","Type":"ContainerStarted","Data":"714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6"} Jan 23 07:00:02 crc kubenswrapper[5102]: I0123 07:00:02.121615 5102 generic.go:334] "Generic (PLEG): container finished" podID="53cdfad3-0375-4019-bc82-0240a66527c6" containerID="26596e9af76163e8a38826b5156c15696fccd899d182d740d83a3abb6eea15f7" exitCode=0 Jan 23 07:00:02 crc kubenswrapper[5102]: I0123 07:00:02.121700 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" event={"ID":"53cdfad3-0375-4019-bc82-0240a66527c6","Type":"ContainerDied","Data":"26596e9af76163e8a38826b5156c15696fccd899d182d740d83a3abb6eea15f7"} Jan 23 07:00:02 crc kubenswrapper[5102]: I0123 07:00:02.123369 5102 generic.go:334] "Generic (PLEG): container finished" podID="e68a118c-0a6c-48fb-837a-b19bb6d00b0b" containerID="3af4065d7f6aea433fb15a715fd78585f6d1abb6d25fe20a5860aaf66a34e82f" exitCode=0 Jan 23 07:00:02 crc kubenswrapper[5102]: I0123 07:00:02.123402 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-92s74" event={"ID":"e68a118c-0a6c-48fb-837a-b19bb6d00b0b","Type":"ContainerDied","Data":"3af4065d7f6aea433fb15a715fd78585f6d1abb6d25fe20a5860aaf66a34e82f"} Jan 23 07:00:02 crc kubenswrapper[5102]: I0123 07:00:02.152396 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sqprz" podStartSLOduration=2.486105205 podStartE2EDuration="5.152354166s" podCreationTimestamp="2026-01-23 06:59:57 +0000 UTC" firstStartedPulling="2026-01-23 06:59:59.071404959 +0000 UTC m=+349.891753934" lastFinishedPulling="2026-01-23 07:00:01.73765391 +0000 UTC m=+352.558002895" observedRunningTime="2026-01-23 07:00:02.148124161 +0000 UTC m=+352.968473146" watchObservedRunningTime="2026-01-23 07:00:02.152354166 +0000 UTC m=+352.972703141" Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.133009 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-92s74" event={"ID":"e68a118c-0a6c-48fb-837a-b19bb6d00b0b","Type":"ContainerStarted","Data":"a49d2a028fce7fed1e99701c7b20fbd5baa8e3b8f806410e99ba270b5928d253"} Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.563343 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.683168 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zdbng\" (UniqueName: \"kubernetes.io/projected/53cdfad3-0375-4019-bc82-0240a66527c6-kube-api-access-zdbng\") pod \"53cdfad3-0375-4019-bc82-0240a66527c6\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.683316 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53cdfad3-0375-4019-bc82-0240a66527c6-config-volume\") pod \"53cdfad3-0375-4019-bc82-0240a66527c6\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.683348 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53cdfad3-0375-4019-bc82-0240a66527c6-secret-volume\") pod \"53cdfad3-0375-4019-bc82-0240a66527c6\" (UID: \"53cdfad3-0375-4019-bc82-0240a66527c6\") " Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.684396 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/53cdfad3-0375-4019-bc82-0240a66527c6-config-volume" (OuterVolumeSpecName: "config-volume") pod "53cdfad3-0375-4019-bc82-0240a66527c6" (UID: "53cdfad3-0375-4019-bc82-0240a66527c6"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.692204 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/53cdfad3-0375-4019-bc82-0240a66527c6-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "53cdfad3-0375-4019-bc82-0240a66527c6" (UID: "53cdfad3-0375-4019-bc82-0240a66527c6"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.692320 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53cdfad3-0375-4019-bc82-0240a66527c6-kube-api-access-zdbng" (OuterVolumeSpecName: "kube-api-access-zdbng") pod "53cdfad3-0375-4019-bc82-0240a66527c6" (UID: "53cdfad3-0375-4019-bc82-0240a66527c6"). InnerVolumeSpecName "kube-api-access-zdbng". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.785422 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/53cdfad3-0375-4019-bc82-0240a66527c6-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.785478 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/53cdfad3-0375-4019-bc82-0240a66527c6-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:03 crc kubenswrapper[5102]: I0123 07:00:03.785489 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zdbng\" (UniqueName: \"kubernetes.io/projected/53cdfad3-0375-4019-bc82-0240a66527c6-kube-api-access-zdbng\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:04 crc kubenswrapper[5102]: I0123 07:00:04.146521 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" Jan 23 07:00:04 crc kubenswrapper[5102]: I0123 07:00:04.147077 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z" event={"ID":"53cdfad3-0375-4019-bc82-0240a66527c6","Type":"ContainerDied","Data":"eb98344b8f13debb855e89ba5caa3f1960f09e68e164e3d0c4661f2d23611f14"} Jan 23 07:00:04 crc kubenswrapper[5102]: I0123 07:00:04.147115 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eb98344b8f13debb855e89ba5caa3f1960f09e68e164e3d0c4661f2d23611f14" Jan 23 07:00:04 crc kubenswrapper[5102]: I0123 07:00:04.176514 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-92s74" podStartSLOduration=3.560556771 podStartE2EDuration="6.176486392s" podCreationTimestamp="2026-01-23 06:59:58 +0000 UTC" firstStartedPulling="2026-01-23 07:00:00.095914895 +0000 UTC m=+350.916263870" lastFinishedPulling="2026-01-23 07:00:02.711844516 +0000 UTC m=+353.532193491" observedRunningTime="2026-01-23 07:00:04.16921253 +0000 UTC m=+354.989561515" watchObservedRunningTime="2026-01-23 07:00:04.176486392 +0000 UTC m=+354.996835367" Jan 23 07:00:05 crc kubenswrapper[5102]: I0123 07:00:05.977137 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 07:00:05 crc kubenswrapper[5102]: I0123 07:00:05.977205 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 07:00:06 crc kubenswrapper[5102]: I0123 07:00:06.020314 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 07:00:06 crc kubenswrapper[5102]: I0123 07:00:06.135967 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-czpf6" Jan 23 07:00:06 crc kubenswrapper[5102]: I0123 07:00:06.136594 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-czpf6" Jan 23 07:00:06 crc kubenswrapper[5102]: I0123 07:00:06.180992 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-czpf6" Jan 23 07:00:06 crc kubenswrapper[5102]: I0123 07:00:06.210803 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-szq9g" Jan 23 07:00:06 crc kubenswrapper[5102]: I0123 07:00:06.226273 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-czpf6" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.238778 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-cwpzg"] Jan 23 07:00:08 crc kubenswrapper[5102]: E0123 07:00:08.239168 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53cdfad3-0375-4019-bc82-0240a66527c6" containerName="collect-profiles" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.239191 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="53cdfad3-0375-4019-bc82-0240a66527c6" containerName="collect-profiles" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.239344 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="53cdfad3-0375-4019-bc82-0240a66527c6" containerName="collect-profiles" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.240167 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.264229 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-cwpzg"] Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363722 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqvhz\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-kube-api-access-pqvhz\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363805 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-registry-tls\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363846 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363872 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/083d84a9-c763-49e9-90c2-19c21b0a4465-installation-pull-secrets\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363895 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-bound-sa-token\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363915 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/083d84a9-c763-49e9-90c2-19c21b0a4465-registry-certificates\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363935 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/083d84a9-c763-49e9-90c2-19c21b0a4465-trusted-ca\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.363967 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/083d84a9-c763-49e9-90c2-19c21b0a4465-ca-trust-extracted\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.377080 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.379132 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.395020 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.424859 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.465964 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqvhz\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-kube-api-access-pqvhz\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.466029 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-registry-tls\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.466074 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/083d84a9-c763-49e9-90c2-19c21b0a4465-installation-pull-secrets\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.466111 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-bound-sa-token\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.466135 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/083d84a9-c763-49e9-90c2-19c21b0a4465-registry-certificates\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.466155 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/083d84a9-c763-49e9-90c2-19c21b0a4465-trusted-ca\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.466185 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/083d84a9-c763-49e9-90c2-19c21b0a4465-ca-trust-extracted\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.467242 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/083d84a9-c763-49e9-90c2-19c21b0a4465-ca-trust-extracted\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.468207 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/083d84a9-c763-49e9-90c2-19c21b0a4465-registry-certificates\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.469623 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/083d84a9-c763-49e9-90c2-19c21b0a4465-trusted-ca\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.475186 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-registry-tls\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.479747 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/083d84a9-c763-49e9-90c2-19c21b0a4465-installation-pull-secrets\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.482860 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqvhz\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-kube-api-access-pqvhz\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.483747 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/083d84a9-c763-49e9-90c2-19c21b0a4465-bound-sa-token\") pod \"image-registry-66df7c8f76-cwpzg\" (UID: \"083d84a9-c763-49e9-90c2-19c21b0a4465\") " pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.516637 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.517273 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.562941 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.564946 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 07:00:08 crc kubenswrapper[5102]: I0123 07:00:08.992202 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-cwpzg"] Jan 23 07:00:09 crc kubenswrapper[5102]: I0123 07:00:09.183225 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" event={"ID":"083d84a9-c763-49e9-90c2-19c21b0a4465","Type":"ContainerStarted","Data":"b2206f812dfd3cf63c8716d071e7e74d5382e6ee2c4164630710f3383064e544"} Jan 23 07:00:09 crc kubenswrapper[5102]: I0123 07:00:09.229368 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-92s74" Jan 23 07:00:09 crc kubenswrapper[5102]: I0123 07:00:09.229814 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 07:00:13 crc kubenswrapper[5102]: I0123 07:00:13.209503 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" event={"ID":"083d84a9-c763-49e9-90c2-19c21b0a4465","Type":"ContainerStarted","Data":"981a871d6b71f92cb1aaf6718f202e93500c1d7f36181b478c7c943bc265a107"} Jan 23 07:00:13 crc kubenswrapper[5102]: I0123 07:00:13.210034 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:13 crc kubenswrapper[5102]: I0123 07:00:13.234876 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" podStartSLOduration=5.23485154 podStartE2EDuration="5.23485154s" podCreationTimestamp="2026-01-23 07:00:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:00:13.233180006 +0000 UTC m=+364.053529001" watchObservedRunningTime="2026-01-23 07:00:13.23485154 +0000 UTC m=+364.055200515" Jan 23 07:00:16 crc kubenswrapper[5102]: I0123 07:00:16.768641 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:00:16 crc kubenswrapper[5102]: I0123 07:00:16.769488 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:00:28 crc kubenswrapper[5102]: I0123 07:00:28.570582 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-cwpzg" Jan 23 07:00:28 crc kubenswrapper[5102]: I0123 07:00:28.641385 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-hgjw2"] Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.249559 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4"] Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.252386 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" podUID="f2a15c59-1d14-41ff-842e-f2e3b7c7283d" containerName="route-controller-manager" containerID="cri-o://527e64dafb37c22e7714c2f0a955b5dd06cf69f342b2bbc559c8085ce7d99758" gracePeriod=30 Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.410777 5102 generic.go:334] "Generic (PLEG): container finished" podID="f2a15c59-1d14-41ff-842e-f2e3b7c7283d" containerID="527e64dafb37c22e7714c2f0a955b5dd06cf69f342b2bbc559c8085ce7d99758" exitCode=0 Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.410848 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" event={"ID":"f2a15c59-1d14-41ff-842e-f2e3b7c7283d","Type":"ContainerDied","Data":"527e64dafb37c22e7714c2f0a955b5dd06cf69f342b2bbc559c8085ce7d99758"} Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.652947 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.804841 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-config\") pod \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.805019 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-client-ca\") pod \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.805107 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-serving-cert\") pod \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.805171 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qsnlr\" (UniqueName: \"kubernetes.io/projected/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-kube-api-access-qsnlr\") pod \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\" (UID: \"f2a15c59-1d14-41ff-842e-f2e3b7c7283d\") " Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.805965 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-client-ca" (OuterVolumeSpecName: "client-ca") pod "f2a15c59-1d14-41ff-842e-f2e3b7c7283d" (UID: "f2a15c59-1d14-41ff-842e-f2e3b7c7283d"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.806020 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-config" (OuterVolumeSpecName: "config") pod "f2a15c59-1d14-41ff-842e-f2e3b7c7283d" (UID: "f2a15c59-1d14-41ff-842e-f2e3b7c7283d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.812007 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "f2a15c59-1d14-41ff-842e-f2e3b7c7283d" (UID: "f2a15c59-1d14-41ff-842e-f2e3b7c7283d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.812932 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-kube-api-access-qsnlr" (OuterVolumeSpecName: "kube-api-access-qsnlr") pod "f2a15c59-1d14-41ff-842e-f2e3b7c7283d" (UID: "f2a15c59-1d14-41ff-842e-f2e3b7c7283d"). InnerVolumeSpecName "kube-api-access-qsnlr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.907417 5102 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-client-ca\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.908163 5102 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.908205 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qsnlr\" (UniqueName: \"kubernetes.io/projected/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-kube-api-access-qsnlr\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:40 crc kubenswrapper[5102]: I0123 07:00:40.908228 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f2a15c59-1d14-41ff-842e-f2e3b7c7283d-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:41 crc kubenswrapper[5102]: I0123 07:00:41.419787 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" event={"ID":"f2a15c59-1d14-41ff-842e-f2e3b7c7283d","Type":"ContainerDied","Data":"46c4014b2d1f92b0f787a7ddd5e61c387cbf5dfb6b891ebe30653ffba8a7f749"} Jan 23 07:00:41 crc kubenswrapper[5102]: I0123 07:00:41.421723 5102 scope.go:117] "RemoveContainer" containerID="527e64dafb37c22e7714c2f0a955b5dd06cf69f342b2bbc559c8085ce7d99758" Jan 23 07:00:41 crc kubenswrapper[5102]: I0123 07:00:41.420011 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4" Jan 23 07:00:41 crc kubenswrapper[5102]: I0123 07:00:41.465194 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4"] Jan 23 07:00:41 crc kubenswrapper[5102]: I0123 07:00:41.472102 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-7447957dcb-wdsh4"] Jan 23 07:00:41 crc kubenswrapper[5102]: I0123 07:00:41.607268 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2a15c59-1d14-41ff-842e-f2e3b7c7283d" path="/var/lib/kubelet/pods/f2a15c59-1d14-41ff-842e-f2e3b7c7283d/volumes" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.175822 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn"] Jan 23 07:00:42 crc kubenswrapper[5102]: E0123 07:00:42.176275 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2a15c59-1d14-41ff-842e-f2e3b7c7283d" containerName="route-controller-manager" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.176298 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2a15c59-1d14-41ff-842e-f2e3b7c7283d" containerName="route-controller-manager" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.176577 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2a15c59-1d14-41ff-842e-f2e3b7c7283d" containerName="route-controller-manager" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.177949 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.182175 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.185588 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.185947 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.186208 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.186487 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.186574 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.193820 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn"] Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.340035 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/be578f91-cc11-4789-9310-ca9398dbc590-client-ca\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.340162 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gg8pv\" (UniqueName: \"kubernetes.io/projected/be578f91-cc11-4789-9310-ca9398dbc590-kube-api-access-gg8pv\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.340391 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be578f91-cc11-4789-9310-ca9398dbc590-serving-cert\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.340641 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be578f91-cc11-4789-9310-ca9398dbc590-config\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.442759 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/be578f91-cc11-4789-9310-ca9398dbc590-client-ca\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.442839 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gg8pv\" (UniqueName: \"kubernetes.io/projected/be578f91-cc11-4789-9310-ca9398dbc590-kube-api-access-gg8pv\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.442868 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be578f91-cc11-4789-9310-ca9398dbc590-serving-cert\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.442901 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be578f91-cc11-4789-9310-ca9398dbc590-config\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.444816 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/be578f91-cc11-4789-9310-ca9398dbc590-config\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.444855 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/be578f91-cc11-4789-9310-ca9398dbc590-client-ca\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.450437 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/be578f91-cc11-4789-9310-ca9398dbc590-serving-cert\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.467981 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gg8pv\" (UniqueName: \"kubernetes.io/projected/be578f91-cc11-4789-9310-ca9398dbc590-kube-api-access-gg8pv\") pod \"route-controller-manager-665bfbf858-p4cnn\" (UID: \"be578f91-cc11-4789-9310-ca9398dbc590\") " pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.564355 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:42 crc kubenswrapper[5102]: I0123 07:00:42.799452 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn"] Jan 23 07:00:43 crc kubenswrapper[5102]: I0123 07:00:43.438800 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" event={"ID":"be578f91-cc11-4789-9310-ca9398dbc590","Type":"ContainerStarted","Data":"e8e194c413568d6be0b23becaa742f604c0d83389ffbbb78091a3932c8d4286e"} Jan 23 07:00:43 crc kubenswrapper[5102]: I0123 07:00:43.438858 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" event={"ID":"be578f91-cc11-4789-9310-ca9398dbc590","Type":"ContainerStarted","Data":"ba3bdea9f06126e415ec6d09e01ceb3c47309f61b8cf4ffd07af6a46a4020144"} Jan 23 07:00:43 crc kubenswrapper[5102]: I0123 07:00:43.439135 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:43 crc kubenswrapper[5102]: I0123 07:00:43.445421 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" Jan 23 07:00:43 crc kubenswrapper[5102]: I0123 07:00:43.474019 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-665bfbf858-p4cnn" podStartSLOduration=3.473990769 podStartE2EDuration="3.473990769s" podCreationTimestamp="2026-01-23 07:00:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:00:43.46432029 +0000 UTC m=+394.284669275" watchObservedRunningTime="2026-01-23 07:00:43.473990769 +0000 UTC m=+394.294339744" Jan 23 07:00:46 crc kubenswrapper[5102]: I0123 07:00:46.768735 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:00:46 crc kubenswrapper[5102]: I0123 07:00:46.769887 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:00:53 crc kubenswrapper[5102]: I0123 07:00:53.682745 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" podUID="25efae93-08ee-4c4a-88db-3faa88559398" containerName="registry" containerID="cri-o://b55105cf116daf83d3792a8e4a87e9cdbf81c6d461e2376dba87eb8a97a7f113" gracePeriod=30 Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.526665 5102 generic.go:334] "Generic (PLEG): container finished" podID="25efae93-08ee-4c4a-88db-3faa88559398" containerID="b55105cf116daf83d3792a8e4a87e9cdbf81c6d461e2376dba87eb8a97a7f113" exitCode=0 Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.526861 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" event={"ID":"25efae93-08ee-4c4a-88db-3faa88559398","Type":"ContainerDied","Data":"b55105cf116daf83d3792a8e4a87e9cdbf81c6d461e2376dba87eb8a97a7f113"} Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.605067 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.766331 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/25efae93-08ee-4c4a-88db-3faa88559398-installation-pull-secrets\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.766907 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-registry-certificates\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.766946 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/25efae93-08ee-4c4a-88db-3faa88559398-ca-trust-extracted\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.766967 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-bound-sa-token\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.767825 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.767879 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-trusted-ca\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.767946 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hcm89\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-kube-api-access-hcm89\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.767988 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-registry-tls\") pod \"25efae93-08ee-4c4a-88db-3faa88559398\" (UID: \"25efae93-08ee-4c4a-88db-3faa88559398\") " Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.769514 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.770109 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.775177 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.777044 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.777212 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25efae93-08ee-4c4a-88db-3faa88559398-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.785736 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.789801 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-kube-api-access-hcm89" (OuterVolumeSpecName: "kube-api-access-hcm89") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "kube-api-access-hcm89". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.810402 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/25efae93-08ee-4c4a-88db-3faa88559398-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "25efae93-08ee-4c4a-88db-3faa88559398" (UID: "25efae93-08ee-4c4a-88db-3faa88559398"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.869287 5102 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/25efae93-08ee-4c4a-88db-3faa88559398-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.869329 5102 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-bound-sa-token\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.869342 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-trusted-ca\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.869351 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hcm89\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-kube-api-access-hcm89\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.869364 5102 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/25efae93-08ee-4c4a-88db-3faa88559398-registry-tls\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.869373 5102 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/25efae93-08ee-4c4a-88db-3faa88559398-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:54 crc kubenswrapper[5102]: I0123 07:00:54.869382 5102 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/25efae93-08ee-4c4a-88db-3faa88559398-registry-certificates\") on node \"crc\" DevicePath \"\"" Jan 23 07:00:55 crc kubenswrapper[5102]: I0123 07:00:55.536593 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" event={"ID":"25efae93-08ee-4c4a-88db-3faa88559398","Type":"ContainerDied","Data":"79cd91dc3c7ad61f39465edd4d205f17a820fd8ed9c526469c73cd41d6855945"} Jan 23 07:00:55 crc kubenswrapper[5102]: I0123 07:00:55.536664 5102 scope.go:117] "RemoveContainer" containerID="b55105cf116daf83d3792a8e4a87e9cdbf81c6d461e2376dba87eb8a97a7f113" Jan 23 07:00:55 crc kubenswrapper[5102]: I0123 07:00:55.536707 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-hgjw2" Jan 23 07:00:55 crc kubenswrapper[5102]: I0123 07:00:55.581522 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-hgjw2"] Jan 23 07:00:55 crc kubenswrapper[5102]: I0123 07:00:55.585685 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-hgjw2"] Jan 23 07:00:55 crc kubenswrapper[5102]: I0123 07:00:55.611927 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25efae93-08ee-4c4a-88db-3faa88559398" path="/var/lib/kubelet/pods/25efae93-08ee-4c4a-88db-3faa88559398/volumes" Jan 23 07:01:16 crc kubenswrapper[5102]: I0123 07:01:16.769149 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:01:16 crc kubenswrapper[5102]: I0123 07:01:16.770386 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:01:16 crc kubenswrapper[5102]: I0123 07:01:16.770488 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:01:16 crc kubenswrapper[5102]: I0123 07:01:16.772413 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a4b5a462104bcad6d87d3ad5d1a4e712c5682a2573ed394d5ddf17fee434ba9c"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:01:16 crc kubenswrapper[5102]: I0123 07:01:16.772716 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://a4b5a462104bcad6d87d3ad5d1a4e712c5682a2573ed394d5ddf17fee434ba9c" gracePeriod=600 Jan 23 07:01:17 crc kubenswrapper[5102]: I0123 07:01:17.685005 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="a4b5a462104bcad6d87d3ad5d1a4e712c5682a2573ed394d5ddf17fee434ba9c" exitCode=0 Jan 23 07:01:17 crc kubenswrapper[5102]: I0123 07:01:17.685077 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"a4b5a462104bcad6d87d3ad5d1a4e712c5682a2573ed394d5ddf17fee434ba9c"} Jan 23 07:01:17 crc kubenswrapper[5102]: I0123 07:01:17.685563 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"8e41675d87b3cf42aaa00755aec44fa42fed44d601cbcf5f4d3eb3b79c2bf254"} Jan 23 07:01:17 crc kubenswrapper[5102]: I0123 07:01:17.685596 5102 scope.go:117] "RemoveContainer" containerID="8293777796bbdf80b69f11dcdc36caba47dfc9a8e252e1b64a0da8af9680218b" Jan 23 07:03:46 crc kubenswrapper[5102]: I0123 07:03:46.768240 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:03:46 crc kubenswrapper[5102]: I0123 07:03:46.768902 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:04:16 crc kubenswrapper[5102]: I0123 07:04:16.768482 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:04:16 crc kubenswrapper[5102]: I0123 07:04:16.769192 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:04:46 crc kubenswrapper[5102]: I0123 07:04:46.768629 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:04:46 crc kubenswrapper[5102]: I0123 07:04:46.769280 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:04:46 crc kubenswrapper[5102]: I0123 07:04:46.769364 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:04:46 crc kubenswrapper[5102]: I0123 07:04:46.770379 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e41675d87b3cf42aaa00755aec44fa42fed44d601cbcf5f4d3eb3b79c2bf254"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:04:46 crc kubenswrapper[5102]: I0123 07:04:46.770477 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://8e41675d87b3cf42aaa00755aec44fa42fed44d601cbcf5f4d3eb3b79c2bf254" gracePeriod=600 Jan 23 07:04:47 crc kubenswrapper[5102]: I0123 07:04:47.220087 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="8e41675d87b3cf42aaa00755aec44fa42fed44d601cbcf5f4d3eb3b79c2bf254" exitCode=0 Jan 23 07:04:47 crc kubenswrapper[5102]: I0123 07:04:47.220152 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"8e41675d87b3cf42aaa00755aec44fa42fed44d601cbcf5f4d3eb3b79c2bf254"} Jan 23 07:04:47 crc kubenswrapper[5102]: I0123 07:04:47.220514 5102 scope.go:117] "RemoveContainer" containerID="a4b5a462104bcad6d87d3ad5d1a4e712c5682a2573ed394d5ddf17fee434ba9c" Jan 23 07:04:48 crc kubenswrapper[5102]: I0123 07:04:48.228611 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"a3ea4a6a5f40da62670a32045877b314d68399c0e473852af43fba0c48ca60c3"} Jan 23 07:05:09 crc kubenswrapper[5102]: I0123 07:05:09.885468 5102 scope.go:117] "RemoveContainer" containerID="a18e019c4fc8e7ba391f1d1048ad1d0684b9d6acc91954457f54a8200f09685b" Jan 23 07:06:53 crc kubenswrapper[5102]: I0123 07:06:53.454518 5102 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Jan 23 07:07:16 crc kubenswrapper[5102]: I0123 07:07:16.768651 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:07:16 crc kubenswrapper[5102]: I0123 07:07:16.769261 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.932141 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cgkqt"] Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.933277 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-controller" containerID="cri-o://f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442" gracePeriod=30 Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.933414 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5" gracePeriod=30 Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.933461 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-node" containerID="cri-o://37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5" gracePeriod=30 Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.933528 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-acl-logging" containerID="cri-o://2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7" gracePeriod=30 Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.933534 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="northd" containerID="cri-o://22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba" gracePeriod=30 Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.933910 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="sbdb" containerID="cri-o://3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860" gracePeriod=30 Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.933354 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="nbdb" containerID="cri-o://7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d" gracePeriod=30 Jan 23 07:07:20 crc kubenswrapper[5102]: I0123 07:07:20.992387 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" containerID="cri-o://c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51" gracePeriod=30 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.249416 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovnkube-controller/3.log" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.252244 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovn-acl-logging/0.log" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.252913 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovn-controller/0.log" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253314 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51" exitCode=0 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253334 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860" exitCode=0 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253341 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d" exitCode=0 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253348 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5" exitCode=0 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253357 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5" exitCode=0 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253365 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7" exitCode=143 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253375 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442" exitCode=143 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253427 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253504 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253527 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253570 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253589 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253607 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253625 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.253591 5102 scope.go:117] "RemoveContainer" containerID="d806c050c6e89333fd4729b4a1262fc8820e4263abdf480d4d6fe232e98b8741" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.255524 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/2.log" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.256250 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/1.log" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.256317 5102 generic.go:334] "Generic (PLEG): container finished" podID="c1446a26-ae38-40f3-a313-8604f5e98285" containerID="40aff2867ef29c03741072586843e76d98d67bbbacaf071da7e59ad200163102" exitCode=2 Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.256360 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerDied","Data":"40aff2867ef29c03741072586843e76d98d67bbbacaf071da7e59ad200163102"} Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.258771 5102 scope.go:117] "RemoveContainer" containerID="40aff2867ef29c03741072586843e76d98d67bbbacaf071da7e59ad200163102" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.281953 5102 scope.go:117] "RemoveContainer" containerID="0208d2d8bf5494738fecb4c1127d25ccc7318c055e32d29ae8a493a1b34ee132" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.292639 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovn-acl-logging/0.log" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.293054 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovn-controller/0.log" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.293470 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.355494 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-tv7tk"] Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356091 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356118 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356139 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kubecfg-setup" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356148 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kubecfg-setup" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356164 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356173 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356182 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-acl-logging" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356189 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-acl-logging" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356199 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-node" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356206 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-node" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356219 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="25efae93-08ee-4c4a-88db-3faa88559398" containerName="registry" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356228 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="25efae93-08ee-4c4a-88db-3faa88559398" containerName="registry" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356241 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="sbdb" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356249 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="sbdb" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356262 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356270 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356279 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="nbdb" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356287 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="nbdb" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356297 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356304 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356313 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-ovn-metrics" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356321 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-ovn-metrics" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356337 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="northd" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356344 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="northd" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356467 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356480 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356493 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="sbdb" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356514 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="25efae93-08ee-4c4a-88db-3faa88559398" containerName="registry" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356528 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356541 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="nbdb" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356563 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-node" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356574 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="kube-rbac-proxy-ovn-metrics" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356582 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="northd" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356592 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356599 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovn-acl-logging" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356735 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356746 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: E0123 07:07:21.356765 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356773 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356896 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.356912 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerName="ovnkube-controller" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.359346 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.430850 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-netd\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.430905 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-etc-openvswitch\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.430929 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-netns\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.430964 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-config\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.430993 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-env-overrides\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431008 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-var-lib-openvswitch\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431028 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-var-lib-cni-networks-ovn-kubernetes\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431061 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-openvswitch\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431078 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-systemd\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431097 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sjfbl\" (UniqueName: \"kubernetes.io/projected/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-kube-api-access-sjfbl\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431112 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-slash\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431131 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-systemd-units\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431154 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-kubelet\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431175 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-script-lib\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431201 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-bin\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431220 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-node-log\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431242 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-ovn\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431259 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-ovn-kubernetes\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431279 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovn-node-metrics-cert\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431292 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-log-socket\") pod \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\" (UID: \"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da\") " Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431722 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-slash" (OuterVolumeSpecName: "host-slash") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431797 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431839 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.431777 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432001 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432080 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432148 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432174 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432210 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432227 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432239 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-node-log" (OuterVolumeSpecName: "node-log") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432257 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432509 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432604 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432906 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.432937 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.433076 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-log-socket" (OuterVolumeSpecName: "log-socket") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.437721 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.437875 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-kube-api-access-sjfbl" (OuterVolumeSpecName: "kube-api-access-sjfbl") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "kube-api-access-sjfbl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.445058 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" (UID: "9b926ddd-8c4e-41b8-87f9-aa35fb7af1da"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533030 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-node-log\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533321 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-run-ovn-kubernetes\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533392 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-run-netns\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533462 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-ovnkube-script-lib\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533530 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533629 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-cni-netd\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533716 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9574dc74-be49-42ff-8d51-0656c40a26fd-ovn-node-metrics-cert\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533793 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-cni-bin\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533865 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-env-overrides\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.533941 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-kubelet\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534003 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-ovnkube-config\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534074 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-etc-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534131 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534204 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-ovn\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534277 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-slash\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534347 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fplhw\" (UniqueName: \"kubernetes.io/projected/9574dc74-be49-42ff-8d51-0656c40a26fd-kube-api-access-fplhw\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534416 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-systemd\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534493 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-log-socket\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534698 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-systemd-units\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.534810 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-var-lib-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535015 5102 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535048 5102 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-netns\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535070 5102 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535089 5102 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-env-overrides\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535106 5102 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535127 5102 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535148 5102 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-openvswitch\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535167 5102 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-systemd\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535186 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sjfbl\" (UniqueName: \"kubernetes.io/projected/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-kube-api-access-sjfbl\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535207 5102 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-slash\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535226 5102 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-systemd-units\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535243 5102 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-kubelet\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535260 5102 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535278 5102 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-bin\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535299 5102 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-node-log\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535324 5102 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535348 5102 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535373 5102 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535395 5102 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-log-socket\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.535420 5102 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da-host-cni-netd\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636066 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-env-overrides\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636110 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-kubelet\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636130 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-ovnkube-config\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636153 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-etc-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636168 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636189 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-ovn\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636216 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-slash\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636232 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fplhw\" (UniqueName: \"kubernetes.io/projected/9574dc74-be49-42ff-8d51-0656c40a26fd-kube-api-access-fplhw\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636247 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-systemd\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636321 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-log-socket\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636338 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-systemd-units\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636353 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-var-lib-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636377 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-node-log\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636394 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-run-ovn-kubernetes\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636408 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-run-netns\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636423 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-ovnkube-script-lib\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636441 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636460 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-cni-netd\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636477 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9574dc74-be49-42ff-8d51-0656c40a26fd-ovn-node-metrics-cert\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636496 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-cni-bin\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.636572 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-cni-bin\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637129 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-env-overrides\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637161 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-kubelet\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637542 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-ovnkube-config\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637594 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-etc-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637616 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637637 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-ovn\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637657 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-slash\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637911 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-run-systemd\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637935 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-log-socket\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.637956 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-systemd-units\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.638002 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-var-lib-openvswitch\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.638023 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-node-log\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.638043 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-run-ovn-kubernetes\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.638063 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-run-netns\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.638475 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/9574dc74-be49-42ff-8d51-0656c40a26fd-ovnkube-script-lib\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.638506 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.638528 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/9574dc74-be49-42ff-8d51-0656c40a26fd-host-cni-netd\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.645487 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/9574dc74-be49-42ff-8d51-0656c40a26fd-ovn-node-metrics-cert\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.654850 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fplhw\" (UniqueName: \"kubernetes.io/projected/9574dc74-be49-42ff-8d51-0656c40a26fd-kube-api-access-fplhw\") pod \"ovnkube-node-tv7tk\" (UID: \"9574dc74-be49-42ff-8d51-0656c40a26fd\") " pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: I0123 07:07:21.677504 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:21 crc kubenswrapper[5102]: W0123 07:07:21.702684 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9574dc74_be49_42ff_8d51_0656c40a26fd.slice/crio-3250d724957fb9acc722827099b06f629cc3581a047af0a3b5575fa96be4e40a WatchSource:0}: Error finding container 3250d724957fb9acc722827099b06f629cc3581a047af0a3b5575fa96be4e40a: Status 404 returned error can't find the container with id 3250d724957fb9acc722827099b06f629cc3581a047af0a3b5575fa96be4e40a Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.269426 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovn-acl-logging/0.log" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.270360 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-cgkqt_9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/ovn-controller/0.log" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.270853 5102 generic.go:334] "Generic (PLEG): container finished" podID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" containerID="22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba" exitCode=0 Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.270909 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba"} Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.270936 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" event={"ID":"9b926ddd-8c4e-41b8-87f9-aa35fb7af1da","Type":"ContainerDied","Data":"d5fa8289023fded4f70c31a498908d8405775caf992283a000ad815b29bbe14f"} Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.270956 5102 scope.go:117] "RemoveContainer" containerID="c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.271093 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-cgkqt" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.278248 5102 generic.go:334] "Generic (PLEG): container finished" podID="9574dc74-be49-42ff-8d51-0656c40a26fd" containerID="a0316fb574da13e5379a0b40e9b9a4251f545dace089d8358d50e52112ccdb01" exitCode=0 Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.278384 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerDied","Data":"a0316fb574da13e5379a0b40e9b9a4251f545dace089d8358d50e52112ccdb01"} Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.278460 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"3250d724957fb9acc722827099b06f629cc3581a047af0a3b5575fa96be4e40a"} Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.282747 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-5vv4l_c1446a26-ae38-40f3-a313-8604f5e98285/kube-multus/2.log" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.282840 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-5vv4l" event={"ID":"c1446a26-ae38-40f3-a313-8604f5e98285","Type":"ContainerStarted","Data":"8c9797edcb091e94c647207a82000567ee2318ec18901d51f9a6168324a580ce"} Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.314602 5102 scope.go:117] "RemoveContainer" containerID="3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.348634 5102 scope.go:117] "RemoveContainer" containerID="7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.378758 5102 scope.go:117] "RemoveContainer" containerID="22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.399131 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cgkqt"] Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.405036 5102 scope.go:117] "RemoveContainer" containerID="9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.409518 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-cgkqt"] Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.422531 5102 scope.go:117] "RemoveContainer" containerID="37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.437984 5102 scope.go:117] "RemoveContainer" containerID="2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.454484 5102 scope.go:117] "RemoveContainer" containerID="f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.473507 5102 scope.go:117] "RemoveContainer" containerID="5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.496377 5102 scope.go:117] "RemoveContainer" containerID="c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.499266 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51\": container with ID starting with c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51 not found: ID does not exist" containerID="c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.499312 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51"} err="failed to get container status \"c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51\": rpc error: code = NotFound desc = could not find container \"c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51\": container with ID starting with c115e5f19a4990d1d2603606295b38ee2c1a00300bccf797e733cd58a506bf51 not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.499343 5102 scope.go:117] "RemoveContainer" containerID="3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.499774 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\": container with ID starting with 3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860 not found: ID does not exist" containerID="3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.499825 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860"} err="failed to get container status \"3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\": rpc error: code = NotFound desc = could not find container \"3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860\": container with ID starting with 3dc572d6448b1494b133e5592c8cf3bd968019fc6be29d6c3b9c400d91e7d860 not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.499848 5102 scope.go:117] "RemoveContainer" containerID="7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.500157 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\": container with ID starting with 7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d not found: ID does not exist" containerID="7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.500196 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d"} err="failed to get container status \"7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\": rpc error: code = NotFound desc = could not find container \"7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d\": container with ID starting with 7a4aba58d1ec382dafb160b2d7c265d7cfd7383033697f1960ade2e1148f977d not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.500210 5102 scope.go:117] "RemoveContainer" containerID="22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.500614 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\": container with ID starting with 22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba not found: ID does not exist" containerID="22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.500653 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba"} err="failed to get container status \"22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\": rpc error: code = NotFound desc = could not find container \"22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba\": container with ID starting with 22536d05502b0ffba38a9d435063191a0046d71ef97fbf9d020297352e1a46ba not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.500665 5102 scope.go:117] "RemoveContainer" containerID="9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.500926 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\": container with ID starting with 9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5 not found: ID does not exist" containerID="9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.500967 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5"} err="failed to get container status \"9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\": rpc error: code = NotFound desc = could not find container \"9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5\": container with ID starting with 9fa57673b55d9494d8dbc4cf942b29d9e2ee27482448cf05a99faafc6e898cb5 not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.500980 5102 scope.go:117] "RemoveContainer" containerID="37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.501246 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\": container with ID starting with 37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5 not found: ID does not exist" containerID="37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.501265 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5"} err="failed to get container status \"37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\": rpc error: code = NotFound desc = could not find container \"37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5\": container with ID starting with 37f8124a438443b497da89eb6195ef7f4b12945beaebedecd982aa535806ebf5 not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.501299 5102 scope.go:117] "RemoveContainer" containerID="2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.501591 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\": container with ID starting with 2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7 not found: ID does not exist" containerID="2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.501612 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7"} err="failed to get container status \"2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\": rpc error: code = NotFound desc = could not find container \"2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7\": container with ID starting with 2434427e2f8ac4ad4cbf6821fce369108b60cf3a011c1d64636c8b1663f596c7 not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.501624 5102 scope.go:117] "RemoveContainer" containerID="f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.501996 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\": container with ID starting with f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442 not found: ID does not exist" containerID="f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.502022 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442"} err="failed to get container status \"f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\": rpc error: code = NotFound desc = could not find container \"f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442\": container with ID starting with f2b93d4b4e94bfab485ede4be5ccdcdf9f10e1d2c9fd3c53a3d79693c2c83442 not found: ID does not exist" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.502044 5102 scope.go:117] "RemoveContainer" containerID="5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733" Jan 23 07:07:22 crc kubenswrapper[5102]: E0123 07:07:22.502298 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\": container with ID starting with 5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733 not found: ID does not exist" containerID="5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733" Jan 23 07:07:22 crc kubenswrapper[5102]: I0123 07:07:22.502354 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733"} err="failed to get container status \"5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\": rpc error: code = NotFound desc = could not find container \"5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733\": container with ID starting with 5cb06106daa52747bf578234a689567ac9585ac758c6e1d89c7b087addf53733 not found: ID does not exist" Jan 23 07:07:23 crc kubenswrapper[5102]: I0123 07:07:23.298845 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"5d0d331813361eb02c94ca78adfbaac6d5b63c40307af8a740c0b7cb74c8f234"} Jan 23 07:07:23 crc kubenswrapper[5102]: I0123 07:07:23.299229 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"26a513db94043d22c6b2a4d11d1ace5012416e9caac89cbcb97cb85bd076b689"} Jan 23 07:07:23 crc kubenswrapper[5102]: I0123 07:07:23.299255 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"5fa7b85fb66a081ce469c0a34d885aaf59f1769be166cf6a30fa89a923d1770e"} Jan 23 07:07:23 crc kubenswrapper[5102]: I0123 07:07:23.299273 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"59d61f9545ead640be2e9bd2ca9698924efaaace5d575e7b1c386906df84fb3e"} Jan 23 07:07:23 crc kubenswrapper[5102]: I0123 07:07:23.299290 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"b93b24cd8c9374658660143a8518f6a00a946d8e1936485cc8f9c6bfa487492e"} Jan 23 07:07:23 crc kubenswrapper[5102]: I0123 07:07:23.299307 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"3cda5203bf3ce8097676947ed87615b5b1c63d0f9433a57b03c27e84f05e5fbf"} Jan 23 07:07:23 crc kubenswrapper[5102]: I0123 07:07:23.611975 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9b926ddd-8c4e-41b8-87f9-aa35fb7af1da" path="/var/lib/kubelet/pods/9b926ddd-8c4e-41b8-87f9-aa35fb7af1da/volumes" Jan 23 07:07:26 crc kubenswrapper[5102]: I0123 07:07:26.333385 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"3d08ad5c06f8e4b615a521016bfc818943ddd00d3306c4c42a5642e3e56f9d45"} Jan 23 07:07:29 crc kubenswrapper[5102]: I0123 07:07:29.366305 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" event={"ID":"9574dc74-be49-42ff-8d51-0656c40a26fd","Type":"ContainerStarted","Data":"d30f05cb8b66d7fbca74af4323bdef1e5a3cf16bda8a7712f7ba96b415433434"} Jan 23 07:07:29 crc kubenswrapper[5102]: I0123 07:07:29.367093 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:29 crc kubenswrapper[5102]: I0123 07:07:29.367128 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:29 crc kubenswrapper[5102]: I0123 07:07:29.397908 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" podStartSLOduration=8.397876236 podStartE2EDuration="8.397876236s" podCreationTimestamp="2026-01-23 07:07:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:07:29.395119489 +0000 UTC m=+800.215468504" watchObservedRunningTime="2026-01-23 07:07:29.397876236 +0000 UTC m=+800.218225251" Jan 23 07:07:29 crc kubenswrapper[5102]: I0123 07:07:29.412242 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:30 crc kubenswrapper[5102]: I0123 07:07:30.371607 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:30 crc kubenswrapper[5102]: I0123 07:07:30.402579 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.212569 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["crc-storage/crc-storage-crc-x4qvf"] Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.214412 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.216945 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"kube-root-ca.crt" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.217271 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"openshift-service-ca.crt" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.217785 5102 reflector.go:368] Caches populated for *v1.Secret from object-"crc-storage"/"crc-storage-dockercfg-cnq5x" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.220863 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"crc-storage"/"crc-storage" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.229976 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-x4qvf"] Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.314290 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72kpg\" (UniqueName: \"kubernetes.io/projected/e6757421-bf79-47b9-92a2-ed0f2d7f0395-kube-api-access-72kpg\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.314383 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/e6757421-bf79-47b9-92a2-ed0f2d7f0395-node-mnt\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.314412 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/e6757421-bf79-47b9-92a2-ed0f2d7f0395-crc-storage\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.416160 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72kpg\" (UniqueName: \"kubernetes.io/projected/e6757421-bf79-47b9-92a2-ed0f2d7f0395-kube-api-access-72kpg\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.416262 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/e6757421-bf79-47b9-92a2-ed0f2d7f0395-node-mnt\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.416300 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/e6757421-bf79-47b9-92a2-ed0f2d7f0395-crc-storage\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.416733 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/e6757421-bf79-47b9-92a2-ed0f2d7f0395-node-mnt\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.417502 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/e6757421-bf79-47b9-92a2-ed0f2d7f0395-crc-storage\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.456358 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72kpg\" (UniqueName: \"kubernetes.io/projected/e6757421-bf79-47b9-92a2-ed0f2d7f0395-kube-api-access-72kpg\") pod \"crc-storage-crc-x4qvf\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.571886 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.839022 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["crc-storage/crc-storage-crc-x4qvf"] Jan 23 07:07:33 crc kubenswrapper[5102]: W0123 07:07:33.842027 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode6757421_bf79_47b9_92a2_ed0f2d7f0395.slice/crio-20503f5efe14c0f169d5dbc31c10a1eedbf78b2c00a417f348e822e4ca61c926 WatchSource:0}: Error finding container 20503f5efe14c0f169d5dbc31c10a1eedbf78b2c00a417f348e822e4ca61c926: Status 404 returned error can't find the container with id 20503f5efe14c0f169d5dbc31c10a1eedbf78b2c00a417f348e822e4ca61c926 Jan 23 07:07:33 crc kubenswrapper[5102]: I0123 07:07:33.845453 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 07:07:34 crc kubenswrapper[5102]: I0123 07:07:34.399260 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x4qvf" event={"ID":"e6757421-bf79-47b9-92a2-ed0f2d7f0395","Type":"ContainerStarted","Data":"20503f5efe14c0f169d5dbc31c10a1eedbf78b2c00a417f348e822e4ca61c926"} Jan 23 07:07:35 crc kubenswrapper[5102]: I0123 07:07:35.410384 5102 generic.go:334] "Generic (PLEG): container finished" podID="e6757421-bf79-47b9-92a2-ed0f2d7f0395" containerID="8ddf3df60cffabd92a82d0980e9932950d4ca2f1043f72770e1e5333e608c4e7" exitCode=0 Jan 23 07:07:35 crc kubenswrapper[5102]: I0123 07:07:35.410471 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x4qvf" event={"ID":"e6757421-bf79-47b9-92a2-ed0f2d7f0395","Type":"ContainerDied","Data":"8ddf3df60cffabd92a82d0980e9932950d4ca2f1043f72770e1e5333e608c4e7"} Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.736821 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.763953 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-72kpg\" (UniqueName: \"kubernetes.io/projected/e6757421-bf79-47b9-92a2-ed0f2d7f0395-kube-api-access-72kpg\") pod \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.764068 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/e6757421-bf79-47b9-92a2-ed0f2d7f0395-node-mnt\") pod \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.764226 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e6757421-bf79-47b9-92a2-ed0f2d7f0395-node-mnt" (OuterVolumeSpecName: "node-mnt") pod "e6757421-bf79-47b9-92a2-ed0f2d7f0395" (UID: "e6757421-bf79-47b9-92a2-ed0f2d7f0395"). InnerVolumeSpecName "node-mnt". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.764424 5102 reconciler_common.go:293] "Volume detached for volume \"node-mnt\" (UniqueName: \"kubernetes.io/host-path/e6757421-bf79-47b9-92a2-ed0f2d7f0395-node-mnt\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.773807 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e6757421-bf79-47b9-92a2-ed0f2d7f0395-kube-api-access-72kpg" (OuterVolumeSpecName: "kube-api-access-72kpg") pod "e6757421-bf79-47b9-92a2-ed0f2d7f0395" (UID: "e6757421-bf79-47b9-92a2-ed0f2d7f0395"). InnerVolumeSpecName "kube-api-access-72kpg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.864767 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/e6757421-bf79-47b9-92a2-ed0f2d7f0395-crc-storage\") pod \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\" (UID: \"e6757421-bf79-47b9-92a2-ed0f2d7f0395\") " Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.865115 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-72kpg\" (UniqueName: \"kubernetes.io/projected/e6757421-bf79-47b9-92a2-ed0f2d7f0395-kube-api-access-72kpg\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.882943 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e6757421-bf79-47b9-92a2-ed0f2d7f0395-crc-storage" (OuterVolumeSpecName: "crc-storage") pod "e6757421-bf79-47b9-92a2-ed0f2d7f0395" (UID: "e6757421-bf79-47b9-92a2-ed0f2d7f0395"). InnerVolumeSpecName "crc-storage". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:07:36 crc kubenswrapper[5102]: I0123 07:07:36.966229 5102 reconciler_common.go:293] "Volume detached for volume \"crc-storage\" (UniqueName: \"kubernetes.io/configmap/e6757421-bf79-47b9-92a2-ed0f2d7f0395-crc-storage\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:37 crc kubenswrapper[5102]: I0123 07:07:37.426024 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="crc-storage/crc-storage-crc-x4qvf" event={"ID":"e6757421-bf79-47b9-92a2-ed0f2d7f0395","Type":"ContainerDied","Data":"20503f5efe14c0f169d5dbc31c10a1eedbf78b2c00a417f348e822e4ca61c926"} Jan 23 07:07:37 crc kubenswrapper[5102]: I0123 07:07:37.426077 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="20503f5efe14c0f169d5dbc31c10a1eedbf78b2c00a417f348e822e4ca61c926" Jan 23 07:07:37 crc kubenswrapper[5102]: I0123 07:07:37.426127 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="crc-storage/crc-storage-crc-x4qvf" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.289241 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w"] Jan 23 07:07:45 crc kubenswrapper[5102]: E0123 07:07:45.290150 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e6757421-bf79-47b9-92a2-ed0f2d7f0395" containerName="storage" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.290170 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e6757421-bf79-47b9-92a2-ed0f2d7f0395" containerName="storage" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.290344 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e6757421-bf79-47b9-92a2-ed0f2d7f0395" containerName="storage" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.291501 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.293802 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.303229 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w"] Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.317763 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.317847 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2sgwc\" (UniqueName: \"kubernetes.io/projected/dbbb0c70-8da6-4acc-a92d-f19a08611e94-kube-api-access-2sgwc\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.317923 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.418861 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2sgwc\" (UniqueName: \"kubernetes.io/projected/dbbb0c70-8da6-4acc-a92d-f19a08611e94-kube-api-access-2sgwc\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.418956 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.418995 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.419605 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-bundle\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.420748 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-util\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.439522 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2sgwc\" (UniqueName: \"kubernetes.io/projected/dbbb0c70-8da6-4acc-a92d-f19a08611e94-kube-api-access-2sgwc\") pod \"53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:45 crc kubenswrapper[5102]: I0123 07:07:45.609431 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:46 crc kubenswrapper[5102]: I0123 07:07:46.059361 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w"] Jan 23 07:07:46 crc kubenswrapper[5102]: W0123 07:07:46.065479 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddbbb0c70_8da6_4acc_a92d_f19a08611e94.slice/crio-0878ffe7e5361b6bc40b11a9c474f214e6104a5e6d346c71718799bd199fc62b WatchSource:0}: Error finding container 0878ffe7e5361b6bc40b11a9c474f214e6104a5e6d346c71718799bd199fc62b: Status 404 returned error can't find the container with id 0878ffe7e5361b6bc40b11a9c474f214e6104a5e6d346c71718799bd199fc62b Jan 23 07:07:46 crc kubenswrapper[5102]: I0123 07:07:46.484647 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" event={"ID":"dbbb0c70-8da6-4acc-a92d-f19a08611e94","Type":"ContainerStarted","Data":"4ec388b9e27ef29952a0e1fb8295163e44d6ad28e673c2d5880181efde7aa4f5"} Jan 23 07:07:46 crc kubenswrapper[5102]: I0123 07:07:46.484727 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" event={"ID":"dbbb0c70-8da6-4acc-a92d-f19a08611e94","Type":"ContainerStarted","Data":"0878ffe7e5361b6bc40b11a9c474f214e6104a5e6d346c71718799bd199fc62b"} Jan 23 07:07:46 crc kubenswrapper[5102]: I0123 07:07:46.768911 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:07:46 crc kubenswrapper[5102]: I0123 07:07:46.769410 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.045384 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pb5mz"] Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.048649 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.055081 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pb5mz"] Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.144042 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-26v2l\" (UniqueName: \"kubernetes.io/projected/193770b3-f315-42b1-aace-f67be7f43e88-kube-api-access-26v2l\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.144218 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-utilities\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.144350 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-catalog-content\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.246440 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-26v2l\" (UniqueName: \"kubernetes.io/projected/193770b3-f315-42b1-aace-f67be7f43e88-kube-api-access-26v2l\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.246584 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-utilities\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.246632 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-catalog-content\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.247487 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-utilities\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.247633 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-catalog-content\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.271040 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-26v2l\" (UniqueName: \"kubernetes.io/projected/193770b3-f315-42b1-aace-f67be7f43e88-kube-api-access-26v2l\") pod \"redhat-operators-pb5mz\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.413297 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.491239 5102 generic.go:334] "Generic (PLEG): container finished" podID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerID="4ec388b9e27ef29952a0e1fb8295163e44d6ad28e673c2d5880181efde7aa4f5" exitCode=0 Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.491287 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" event={"ID":"dbbb0c70-8da6-4acc-a92d-f19a08611e94","Type":"ContainerDied","Data":"4ec388b9e27ef29952a0e1fb8295163e44d6ad28e673c2d5880181efde7aa4f5"} Jan 23 07:07:47 crc kubenswrapper[5102]: I0123 07:07:47.642363 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pb5mz"] Jan 23 07:07:47 crc kubenswrapper[5102]: W0123 07:07:47.649026 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod193770b3_f315_42b1_aace_f67be7f43e88.slice/crio-2817effdf1fd2015fb17c4b11f3f2962740ddee973b913c1526abea7223fd3f1 WatchSource:0}: Error finding container 2817effdf1fd2015fb17c4b11f3f2962740ddee973b913c1526abea7223fd3f1: Status 404 returned error can't find the container with id 2817effdf1fd2015fb17c4b11f3f2962740ddee973b913c1526abea7223fd3f1 Jan 23 07:07:48 crc kubenswrapper[5102]: I0123 07:07:48.497768 5102 generic.go:334] "Generic (PLEG): container finished" podID="193770b3-f315-42b1-aace-f67be7f43e88" containerID="4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3" exitCode=0 Jan 23 07:07:48 crc kubenswrapper[5102]: I0123 07:07:48.498123 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pb5mz" event={"ID":"193770b3-f315-42b1-aace-f67be7f43e88","Type":"ContainerDied","Data":"4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3"} Jan 23 07:07:48 crc kubenswrapper[5102]: I0123 07:07:48.498160 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pb5mz" event={"ID":"193770b3-f315-42b1-aace-f67be7f43e88","Type":"ContainerStarted","Data":"2817effdf1fd2015fb17c4b11f3f2962740ddee973b913c1526abea7223fd3f1"} Jan 23 07:07:49 crc kubenswrapper[5102]: I0123 07:07:49.511006 5102 generic.go:334] "Generic (PLEG): container finished" podID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerID="f069184b2a0da10d1bf8a551c1fa218d7af313a29666f576cb909958c6c69126" exitCode=0 Jan 23 07:07:49 crc kubenswrapper[5102]: I0123 07:07:49.511110 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" event={"ID":"dbbb0c70-8da6-4acc-a92d-f19a08611e94","Type":"ContainerDied","Data":"f069184b2a0da10d1bf8a551c1fa218d7af313a29666f576cb909958c6c69126"} Jan 23 07:07:49 crc kubenswrapper[5102]: I0123 07:07:49.516664 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pb5mz" event={"ID":"193770b3-f315-42b1-aace-f67be7f43e88","Type":"ContainerStarted","Data":"b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60"} Jan 23 07:07:50 crc kubenswrapper[5102]: I0123 07:07:50.527503 5102 generic.go:334] "Generic (PLEG): container finished" podID="193770b3-f315-42b1-aace-f67be7f43e88" containerID="b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60" exitCode=0 Jan 23 07:07:50 crc kubenswrapper[5102]: I0123 07:07:50.527663 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pb5mz" event={"ID":"193770b3-f315-42b1-aace-f67be7f43e88","Type":"ContainerDied","Data":"b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60"} Jan 23 07:07:50 crc kubenswrapper[5102]: I0123 07:07:50.533791 5102 generic.go:334] "Generic (PLEG): container finished" podID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerID="20a4b0e24e442172b13bd7c679f3b5a3e7446abe8d39d38431f4b5bfdd0dffba" exitCode=0 Jan 23 07:07:50 crc kubenswrapper[5102]: I0123 07:07:50.533855 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" event={"ID":"dbbb0c70-8da6-4acc-a92d-f19a08611e94","Type":"ContainerDied","Data":"20a4b0e24e442172b13bd7c679f3b5a3e7446abe8d39d38431f4b5bfdd0dffba"} Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.542520 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pb5mz" event={"ID":"193770b3-f315-42b1-aace-f67be7f43e88","Type":"ContainerStarted","Data":"2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880"} Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.580401 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pb5mz" podStartSLOduration=2.181899425 podStartE2EDuration="4.580379792s" podCreationTimestamp="2026-01-23 07:07:47 +0000 UTC" firstStartedPulling="2026-01-23 07:07:48.559788996 +0000 UTC m=+819.380137971" lastFinishedPulling="2026-01-23 07:07:50.958269363 +0000 UTC m=+821.778618338" observedRunningTime="2026-01-23 07:07:51.564806258 +0000 UTC m=+822.385155243" watchObservedRunningTime="2026-01-23 07:07:51.580379792 +0000 UTC m=+822.400728777" Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.710861 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-tv7tk" Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.844887 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.905958 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2sgwc\" (UniqueName: \"kubernetes.io/projected/dbbb0c70-8da6-4acc-a92d-f19a08611e94-kube-api-access-2sgwc\") pod \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.906028 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-util\") pod \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.906061 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-bundle\") pod \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\" (UID: \"dbbb0c70-8da6-4acc-a92d-f19a08611e94\") " Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.906784 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-bundle" (OuterVolumeSpecName: "bundle") pod "dbbb0c70-8da6-4acc-a92d-f19a08611e94" (UID: "dbbb0c70-8da6-4acc-a92d-f19a08611e94"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.912122 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dbbb0c70-8da6-4acc-a92d-f19a08611e94-kube-api-access-2sgwc" (OuterVolumeSpecName: "kube-api-access-2sgwc") pod "dbbb0c70-8da6-4acc-a92d-f19a08611e94" (UID: "dbbb0c70-8da6-4acc-a92d-f19a08611e94"). InnerVolumeSpecName "kube-api-access-2sgwc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:07:51 crc kubenswrapper[5102]: I0123 07:07:51.934784 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-util" (OuterVolumeSpecName: "util") pod "dbbb0c70-8da6-4acc-a92d-f19a08611e94" (UID: "dbbb0c70-8da6-4acc-a92d-f19a08611e94"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:07:52 crc kubenswrapper[5102]: I0123 07:07:52.007760 5102 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-util\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:52 crc kubenswrapper[5102]: I0123 07:07:52.007837 5102 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dbbb0c70-8da6-4acc-a92d-f19a08611e94-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:52 crc kubenswrapper[5102]: I0123 07:07:52.007850 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2sgwc\" (UniqueName: \"kubernetes.io/projected/dbbb0c70-8da6-4acc-a92d-f19a08611e94-kube-api-access-2sgwc\") on node \"crc\" DevicePath \"\"" Jan 23 07:07:52 crc kubenswrapper[5102]: I0123 07:07:52.553694 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" Jan 23 07:07:52 crc kubenswrapper[5102]: I0123 07:07:52.556625 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w" event={"ID":"dbbb0c70-8da6-4acc-a92d-f19a08611e94","Type":"ContainerDied","Data":"0878ffe7e5361b6bc40b11a9c474f214e6104a5e6d346c71718799bd199fc62b"} Jan 23 07:07:52 crc kubenswrapper[5102]: I0123 07:07:52.556668 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0878ffe7e5361b6bc40b11a9c474f214e6104a5e6d346c71718799bd199fc62b" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.765741 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9pqnm"] Jan 23 07:07:55 crc kubenswrapper[5102]: E0123 07:07:55.766380 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerName="pull" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.766404 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerName="pull" Jan 23 07:07:55 crc kubenswrapper[5102]: E0123 07:07:55.766432 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerName="extract" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.766445 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerName="extract" Jan 23 07:07:55 crc kubenswrapper[5102]: E0123 07:07:55.766465 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerName="util" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.766478 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerName="util" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.766700 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="dbbb0c70-8da6-4acc-a92d-f19a08611e94" containerName="extract" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.767286 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.769018 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"openshift-service-ca.crt" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.769286 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-operator-dockercfg-2zh29" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.769512 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"kube-root-ca.crt" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.776287 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9pqnm"] Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.860066 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rwwd2\" (UniqueName: \"kubernetes.io/projected/cfc974f5-adaa-43f2-bd3f-d0cf669315f2-kube-api-access-rwwd2\") pod \"nmstate-operator-646758c888-9pqnm\" (UID: \"cfc974f5-adaa-43f2-bd3f-d0cf669315f2\") " pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.961689 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rwwd2\" (UniqueName: \"kubernetes.io/projected/cfc974f5-adaa-43f2-bd3f-d0cf669315f2-kube-api-access-rwwd2\") pod \"nmstate-operator-646758c888-9pqnm\" (UID: \"cfc974f5-adaa-43f2-bd3f-d0cf669315f2\") " pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" Jan 23 07:07:55 crc kubenswrapper[5102]: I0123 07:07:55.998507 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rwwd2\" (UniqueName: \"kubernetes.io/projected/cfc974f5-adaa-43f2-bd3f-d0cf669315f2-kube-api-access-rwwd2\") pod \"nmstate-operator-646758c888-9pqnm\" (UID: \"cfc974f5-adaa-43f2-bd3f-d0cf669315f2\") " pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" Jan 23 07:07:56 crc kubenswrapper[5102]: I0123 07:07:56.083392 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" Jan 23 07:07:56 crc kubenswrapper[5102]: I0123 07:07:56.284951 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-operator-646758c888-9pqnm"] Jan 23 07:07:56 crc kubenswrapper[5102]: W0123 07:07:56.296729 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcfc974f5_adaa_43f2_bd3f_d0cf669315f2.slice/crio-0e981b693dd666c2e9f72b129f2c8bab34055eb26c90d86eb9fe6a0e346d8bf6 WatchSource:0}: Error finding container 0e981b693dd666c2e9f72b129f2c8bab34055eb26c90d86eb9fe6a0e346d8bf6: Status 404 returned error can't find the container with id 0e981b693dd666c2e9f72b129f2c8bab34055eb26c90d86eb9fe6a0e346d8bf6 Jan 23 07:07:56 crc kubenswrapper[5102]: I0123 07:07:56.575646 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" event={"ID":"cfc974f5-adaa-43f2-bd3f-d0cf669315f2","Type":"ContainerStarted","Data":"0e981b693dd666c2e9f72b129f2c8bab34055eb26c90d86eb9fe6a0e346d8bf6"} Jan 23 07:07:57 crc kubenswrapper[5102]: I0123 07:07:57.414372 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:57 crc kubenswrapper[5102]: I0123 07:07:57.414790 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:07:58 crc kubenswrapper[5102]: I0123 07:07:58.480251 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pb5mz" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="registry-server" probeResult="failure" output=< Jan 23 07:07:58 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 07:07:58 crc kubenswrapper[5102]: > Jan 23 07:08:00 crc kubenswrapper[5102]: I0123 07:08:00.605269 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" event={"ID":"cfc974f5-adaa-43f2-bd3f-d0cf669315f2","Type":"ContainerStarted","Data":"007256cea6d5165cf9af056f386e1ecc9d39733888bd63069a04868579d0296e"} Jan 23 07:08:00 crc kubenswrapper[5102]: I0123 07:08:00.636801 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-operator-646758c888-9pqnm" podStartSLOduration=2.40610137 podStartE2EDuration="5.636775163s" podCreationTimestamp="2026-01-23 07:07:55 +0000 UTC" firstStartedPulling="2026-01-23 07:07:56.299240523 +0000 UTC m=+827.119589498" lastFinishedPulling="2026-01-23 07:07:59.529914316 +0000 UTC m=+830.350263291" observedRunningTime="2026-01-23 07:08:00.632648902 +0000 UTC m=+831.452997977" watchObservedRunningTime="2026-01-23 07:08:00.636775163 +0000 UTC m=+831.457124168" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.454608 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-5zbpg"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.455784 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.457858 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"nmstate-handler-dockercfg-b6zz8" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.495607 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts9z5\" (UniqueName: \"kubernetes.io/projected/b517d1cc-d6a8-4857-a414-25efdbfc523f-kube-api-access-ts9z5\") pod \"nmstate-metrics-54757c584b-5zbpg\" (UID: \"b517d1cc-d6a8-4857-a414-25efdbfc523f\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.505490 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-handler-zj4xz"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.506245 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.506694 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.507058 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.509659 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"openshift-nmstate-webhook" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.514030 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.520193 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-5zbpg"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.598689 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts9z5\" (UniqueName: \"kubernetes.io/projected/b517d1cc-d6a8-4857-a414-25efdbfc523f-kube-api-access-ts9z5\") pod \"nmstate-metrics-54757c584b-5zbpg\" (UID: \"b517d1cc-d6a8-4857-a414-25efdbfc523f\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.598773 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-dbus-socket\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.599319 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/26613359-769b-4d8c-846b-aafc773eec15-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-fhrhh\" (UID: \"26613359-769b-4d8c-846b-aafc773eec15\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.599371 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mhqjj\" (UniqueName: \"kubernetes.io/projected/26613359-769b-4d8c-846b-aafc773eec15-kube-api-access-mhqjj\") pod \"nmstate-webhook-8474b5b9d8-fhrhh\" (UID: \"26613359-769b-4d8c-846b-aafc773eec15\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.599562 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-ovs-socket\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.599636 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z5rgp\" (UniqueName: \"kubernetes.io/projected/d3b2043c-f890-4def-9d04-857501627d4d-kube-api-access-z5rgp\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.599660 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-nmstate-lock\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.611958 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.612872 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.615810 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-nmstate"/"nginx-conf" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.616261 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"plugin-serving-cert" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.616591 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-nmstate"/"default-dockercfg-q4jhn" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.621919 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.627578 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts9z5\" (UniqueName: \"kubernetes.io/projected/b517d1cc-d6a8-4857-a414-25efdbfc523f-kube-api-access-ts9z5\") pod \"nmstate-metrics-54757c584b-5zbpg\" (UID: \"b517d1cc-d6a8-4857-a414-25efdbfc523f\") " pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700451 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/26613359-769b-4d8c-846b-aafc773eec15-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-fhrhh\" (UID: \"26613359-769b-4d8c-846b-aafc773eec15\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700492 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mhqjj\" (UniqueName: \"kubernetes.io/projected/26613359-769b-4d8c-846b-aafc773eec15-kube-api-access-mhqjj\") pod \"nmstate-webhook-8474b5b9d8-fhrhh\" (UID: \"26613359-769b-4d8c-846b-aafc773eec15\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700620 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-ovs-socket\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700663 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z5rgp\" (UniqueName: \"kubernetes.io/projected/d3b2043c-f890-4def-9d04-857501627d4d-kube-api-access-z5rgp\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700687 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-nmstate-lock\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700727 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700744 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fhzm5\" (UniqueName: \"kubernetes.io/projected/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-kube-api-access-fhzm5\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700762 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-dbus-socket\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.700802 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.701442 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nmstate-lock\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-nmstate-lock\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.701634 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-socket\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-ovs-socket\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.701688 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dbus-socket\" (UniqueName: \"kubernetes.io/host-path/d3b2043c-f890-4def-9d04-857501627d4d-dbus-socket\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.704129 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tls-key-pair\" (UniqueName: \"kubernetes.io/secret/26613359-769b-4d8c-846b-aafc773eec15-tls-key-pair\") pod \"nmstate-webhook-8474b5b9d8-fhrhh\" (UID: \"26613359-769b-4d8c-846b-aafc773eec15\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.728338 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z5rgp\" (UniqueName: \"kubernetes.io/projected/d3b2043c-f890-4def-9d04-857501627d4d-kube-api-access-z5rgp\") pod \"nmstate-handler-zj4xz\" (UID: \"d3b2043c-f890-4def-9d04-857501627d4d\") " pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.731893 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mhqjj\" (UniqueName: \"kubernetes.io/projected/26613359-769b-4d8c-846b-aafc773eec15-kube-api-access-mhqjj\") pod \"nmstate-webhook-8474b5b9d8-fhrhh\" (UID: \"26613359-769b-4d8c-846b-aafc773eec15\") " pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.778239 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.802285 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.802331 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fhzm5\" (UniqueName: \"kubernetes.io/projected/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-kube-api-access-fhzm5\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.802379 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.803873 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-nginx-conf\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.812713 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-77f454d56c-cxqfd"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.813564 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.814477 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugin-serving-cert\" (UniqueName: \"kubernetes.io/secret/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-plugin-serving-cert\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.828402 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-77f454d56c-cxqfd"] Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.829117 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.832332 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fhzm5\" (UniqueName: \"kubernetes.io/projected/0591e1d6-98eb-4fbe-b102-d420dfb1dd4a-kube-api-access-fhzm5\") pod \"nmstate-console-plugin-7754f76f8b-v7xk7\" (UID: \"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a\") " pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.837557 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.903435 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-service-ca\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.903488 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-trusted-ca-bundle\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.903624 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-oauth-serving-cert\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.903669 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f909c312-e835-429a-9f64-9a9f308c08a3-console-serving-cert\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.903720 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-console-config\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.903886 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qrm57\" (UniqueName: \"kubernetes.io/projected/f909c312-e835-429a-9f64-9a9f308c08a3-kube-api-access-qrm57\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.903905 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f909c312-e835-429a-9f64-9a9f308c08a3-console-oauth-config\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:05 crc kubenswrapper[5102]: I0123 07:08:05.958569 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.004715 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-trusted-ca-bundle\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.004765 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-oauth-serving-cert\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.004790 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f909c312-e835-429a-9f64-9a9f308c08a3-console-serving-cert\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.004811 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-console-config\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.004868 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qrm57\" (UniqueName: \"kubernetes.io/projected/f909c312-e835-429a-9f64-9a9f308c08a3-kube-api-access-qrm57\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.004885 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f909c312-e835-429a-9f64-9a9f308c08a3-console-oauth-config\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.004935 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-service-ca\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.006493 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-service-ca\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.006682 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-trusted-ca-bundle\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.006737 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-console-config\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.007467 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/f909c312-e835-429a-9f64-9a9f308c08a3-oauth-serving-cert\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.013927 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/f909c312-e835-429a-9f64-9a9f308c08a3-console-serving-cert\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.016233 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/f909c312-e835-429a-9f64-9a9f308c08a3-console-oauth-config\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.024856 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qrm57\" (UniqueName: \"kubernetes.io/projected/f909c312-e835-429a-9f64-9a9f308c08a3-kube-api-access-qrm57\") pod \"console-77f454d56c-cxqfd\" (UID: \"f909c312-e835-429a-9f64-9a9f308c08a3\") " pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.071257 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh"] Jan 23 07:08:06 crc kubenswrapper[5102]: W0123 07:08:06.080923 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod26613359_769b_4d8c_846b_aafc773eec15.slice/crio-0b391b4e948539e5da45699ed5ea5ce4b63925f84d4e67201c85bf6a7d5880fa WatchSource:0}: Error finding container 0b391b4e948539e5da45699ed5ea5ce4b63925f84d4e67201c85bf6a7d5880fa: Status 404 returned error can't find the container with id 0b391b4e948539e5da45699ed5ea5ce4b63925f84d4e67201c85bf6a7d5880fa Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.130788 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7"] Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.135067 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:06 crc kubenswrapper[5102]: W0123 07:08:06.137723 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0591e1d6_98eb_4fbe_b102_d420dfb1dd4a.slice/crio-7556ef61297b37672b20ceb79304d45e5fc3d19c5e93275c12495914f7a3c278 WatchSource:0}: Error finding container 7556ef61297b37672b20ceb79304d45e5fc3d19c5e93275c12495914f7a3c278: Status 404 returned error can't find the container with id 7556ef61297b37672b20ceb79304d45e5fc3d19c5e93275c12495914f7a3c278 Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.213955 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-nmstate/nmstate-metrics-54757c584b-5zbpg"] Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.322011 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-77f454d56c-cxqfd"] Jan 23 07:08:06 crc kubenswrapper[5102]: W0123 07:08:06.326082 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf909c312_e835_429a_9f64_9a9f308c08a3.slice/crio-9fdf51587ba6ca8e55ef0a14e92724696f862f7977240e64679893fbae8ff141 WatchSource:0}: Error finding container 9fdf51587ba6ca8e55ef0a14e92724696f862f7977240e64679893fbae8ff141: Status 404 returned error can't find the container with id 9fdf51587ba6ca8e55ef0a14e92724696f862f7977240e64679893fbae8ff141 Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.644301 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-77f454d56c-cxqfd" event={"ID":"f909c312-e835-429a-9f64-9a9f308c08a3","Type":"ContainerStarted","Data":"9fdf51587ba6ca8e55ef0a14e92724696f862f7977240e64679893fbae8ff141"} Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.646418 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" event={"ID":"26613359-769b-4d8c-846b-aafc773eec15","Type":"ContainerStarted","Data":"0b391b4e948539e5da45699ed5ea5ce4b63925f84d4e67201c85bf6a7d5880fa"} Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.647914 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" event={"ID":"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a","Type":"ContainerStarted","Data":"7556ef61297b37672b20ceb79304d45e5fc3d19c5e93275c12495914f7a3c278"} Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.649707 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-zj4xz" event={"ID":"d3b2043c-f890-4def-9d04-857501627d4d","Type":"ContainerStarted","Data":"9f59d17e6125feb020d1d1d0fcf552fe4480ec1ca3856e057d6e21814bd12580"} Jan 23 07:08:06 crc kubenswrapper[5102]: I0123 07:08:06.651440 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" event={"ID":"b517d1cc-d6a8-4857-a414-25efdbfc523f","Type":"ContainerStarted","Data":"25b7d640607f260e90129ba2e425a17bff26ede1156f38a8637f83903550f188"} Jan 23 07:08:07 crc kubenswrapper[5102]: I0123 07:08:07.466169 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:08:07 crc kubenswrapper[5102]: I0123 07:08:07.532658 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:08:07 crc kubenswrapper[5102]: I0123 07:08:07.658026 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-77f454d56c-cxqfd" event={"ID":"f909c312-e835-429a-9f64-9a9f308c08a3","Type":"ContainerStarted","Data":"37a6036252384da5d1de5b4c4a839e80bf1bf72a66154550b21965f39680b6e9"} Jan 23 07:08:07 crc kubenswrapper[5102]: I0123 07:08:07.679695 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-77f454d56c-cxqfd" podStartSLOduration=2.679677153 podStartE2EDuration="2.679677153s" podCreationTimestamp="2026-01-23 07:08:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:08:07.678016041 +0000 UTC m=+838.498365006" watchObservedRunningTime="2026-01-23 07:08:07.679677153 +0000 UTC m=+838.500026128" Jan 23 07:08:07 crc kubenswrapper[5102]: I0123 07:08:07.693071 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pb5mz"] Jan 23 07:08:08 crc kubenswrapper[5102]: I0123 07:08:08.687343 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pb5mz" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="registry-server" containerID="cri-o://2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880" gracePeriod=2 Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.025070 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.044173 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-26v2l\" (UniqueName: \"kubernetes.io/projected/193770b3-f315-42b1-aace-f67be7f43e88-kube-api-access-26v2l\") pod \"193770b3-f315-42b1-aace-f67be7f43e88\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.044263 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-catalog-content\") pod \"193770b3-f315-42b1-aace-f67be7f43e88\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.044334 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-utilities\") pod \"193770b3-f315-42b1-aace-f67be7f43e88\" (UID: \"193770b3-f315-42b1-aace-f67be7f43e88\") " Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.045274 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-utilities" (OuterVolumeSpecName: "utilities") pod "193770b3-f315-42b1-aace-f67be7f43e88" (UID: "193770b3-f315-42b1-aace-f67be7f43e88"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.048936 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/193770b3-f315-42b1-aace-f67be7f43e88-kube-api-access-26v2l" (OuterVolumeSpecName: "kube-api-access-26v2l") pod "193770b3-f315-42b1-aace-f67be7f43e88" (UID: "193770b3-f315-42b1-aace-f67be7f43e88"). InnerVolumeSpecName "kube-api-access-26v2l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.145525 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-26v2l\" (UniqueName: \"kubernetes.io/projected/193770b3-f315-42b1-aace-f67be7f43e88-kube-api-access-26v2l\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.145661 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.157471 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "193770b3-f315-42b1-aace-f67be7f43e88" (UID: "193770b3-f315-42b1-aace-f67be7f43e88"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.246249 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/193770b3-f315-42b1-aace-f67be7f43e88-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.698384 5102 generic.go:334] "Generic (PLEG): container finished" podID="193770b3-f315-42b1-aace-f67be7f43e88" containerID="2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880" exitCode=0 Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.698498 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pb5mz" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.698494 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pb5mz" event={"ID":"193770b3-f315-42b1-aace-f67be7f43e88","Type":"ContainerDied","Data":"2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880"} Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.699284 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pb5mz" event={"ID":"193770b3-f315-42b1-aace-f67be7f43e88","Type":"ContainerDied","Data":"2817effdf1fd2015fb17c4b11f3f2962740ddee973b913c1526abea7223fd3f1"} Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.699322 5102 scope.go:117] "RemoveContainer" containerID="2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.704924 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" event={"ID":"26613359-769b-4d8c-846b-aafc773eec15","Type":"ContainerStarted","Data":"1d587320968313ed2c5c80574fdc26f41b5076267a795611ed660695cab620ff"} Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.704987 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.706278 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" event={"ID":"0591e1d6-98eb-4fbe-b102-d420dfb1dd4a","Type":"ContainerStarted","Data":"4efb8f4d32443010f946eadfebd3d5733a1aa2b30f4fc63906f0c40bbc79acc5"} Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.712081 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-handler-zj4xz" event={"ID":"d3b2043c-f890-4def-9d04-857501627d4d","Type":"ContainerStarted","Data":"402abfca04b0ffb86de8db296f05c83462190633a9d598b1212a6992fa53bca9"} Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.713255 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.720004 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" event={"ID":"b517d1cc-d6a8-4857-a414-25efdbfc523f","Type":"ContainerStarted","Data":"e06a11f34e437a28662d542aa575e2a58aed8977cf5f1bc6a2323663b41e2a41"} Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.724477 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" podStartSLOduration=2.149775504 podStartE2EDuration="4.724459655s" podCreationTimestamp="2026-01-23 07:08:05 +0000 UTC" firstStartedPulling="2026-01-23 07:08:06.083002347 +0000 UTC m=+836.903351322" lastFinishedPulling="2026-01-23 07:08:08.657686498 +0000 UTC m=+839.478035473" observedRunningTime="2026-01-23 07:08:09.723617448 +0000 UTC m=+840.543966433" watchObservedRunningTime="2026-01-23 07:08:09.724459655 +0000 UTC m=+840.544808640" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.728769 5102 scope.go:117] "RemoveContainer" containerID="b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.753598 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pb5mz"] Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.761775 5102 scope.go:117] "RemoveContainer" containerID="4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.761926 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pb5mz"] Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.781070 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-handler-zj4xz" podStartSLOduration=1.993896736 podStartE2EDuration="4.781042127s" podCreationTimestamp="2026-01-23 07:08:05 +0000 UTC" firstStartedPulling="2026-01-23 07:08:05.868925225 +0000 UTC m=+836.689274210" lastFinishedPulling="2026-01-23 07:08:08.656070616 +0000 UTC m=+839.476419601" observedRunningTime="2026-01-23 07:08:09.776892116 +0000 UTC m=+840.597241101" watchObservedRunningTime="2026-01-23 07:08:09.781042127 +0000 UTC m=+840.601391102" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.788632 5102 scope.go:117] "RemoveContainer" containerID="2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880" Jan 23 07:08:09 crc kubenswrapper[5102]: E0123 07:08:09.789132 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880\": container with ID starting with 2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880 not found: ID does not exist" containerID="2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.789189 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880"} err="failed to get container status \"2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880\": rpc error: code = NotFound desc = could not find container \"2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880\": container with ID starting with 2bb5618ffd42fa9bb33457859644713012c8cf09e9df5591049cada0fb2c5880 not found: ID does not exist" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.789226 5102 scope.go:117] "RemoveContainer" containerID="b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60" Jan 23 07:08:09 crc kubenswrapper[5102]: E0123 07:08:09.790760 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60\": container with ID starting with b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60 not found: ID does not exist" containerID="b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.790827 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60"} err="failed to get container status \"b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60\": rpc error: code = NotFound desc = could not find container \"b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60\": container with ID starting with b4a7590bb06135e9b434bc6302ef5136b1675d92d55b2a63c14e7afcdb703d60 not found: ID does not exist" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.790868 5102 scope.go:117] "RemoveContainer" containerID="4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3" Jan 23 07:08:09 crc kubenswrapper[5102]: E0123 07:08:09.791460 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3\": container with ID starting with 4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3 not found: ID does not exist" containerID="4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.792613 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3"} err="failed to get container status \"4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3\": rpc error: code = NotFound desc = could not find container \"4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3\": container with ID starting with 4a29dd9e61e2caa100fd7659dd66ba7bccea76dd16cc593e03e15a6f6921a8c3 not found: ID does not exist" Jan 23 07:08:09 crc kubenswrapper[5102]: I0123 07:08:09.797321 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-console-plugin-7754f76f8b-v7xk7" podStartSLOduration=2.282443338 podStartE2EDuration="4.797303543s" podCreationTimestamp="2026-01-23 07:08:05 +0000 UTC" firstStartedPulling="2026-01-23 07:08:06.142903165 +0000 UTC m=+836.963252140" lastFinishedPulling="2026-01-23 07:08:08.65776337 +0000 UTC m=+839.478112345" observedRunningTime="2026-01-23 07:08:09.796519838 +0000 UTC m=+840.616868823" watchObservedRunningTime="2026-01-23 07:08:09.797303543 +0000 UTC m=+840.617652518" Jan 23 07:08:11 crc kubenswrapper[5102]: I0123 07:08:11.610259 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="193770b3-f315-42b1-aace-f67be7f43e88" path="/var/lib/kubelet/pods/193770b3-f315-42b1-aace-f67be7f43e88/volumes" Jan 23 07:08:11 crc kubenswrapper[5102]: I0123 07:08:11.732469 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" event={"ID":"b517d1cc-d6a8-4857-a414-25efdbfc523f","Type":"ContainerStarted","Data":"01ef6199561e9a4b788a54e85447e95540ce6c85c3ab1db141282d4d8738132c"} Jan 23 07:08:11 crc kubenswrapper[5102]: I0123 07:08:11.751784 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-nmstate/nmstate-metrics-54757c584b-5zbpg" podStartSLOduration=1.474682828 podStartE2EDuration="6.751763704s" podCreationTimestamp="2026-01-23 07:08:05 +0000 UTC" firstStartedPulling="2026-01-23 07:08:06.230531562 +0000 UTC m=+837.050880537" lastFinishedPulling="2026-01-23 07:08:11.507612408 +0000 UTC m=+842.327961413" observedRunningTime="2026-01-23 07:08:11.750192774 +0000 UTC m=+842.570541769" watchObservedRunningTime="2026-01-23 07:08:11.751763704 +0000 UTC m=+842.572112699" Jan 23 07:08:15 crc kubenswrapper[5102]: I0123 07:08:15.873823 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-handler-zj4xz" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.136085 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.136824 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.143902 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.768752 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.768828 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.768889 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.769797 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a3ea4a6a5f40da62670a32045877b314d68399c0e473852af43fba0c48ca60c3"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.769909 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://a3ea4a6a5f40da62670a32045877b314d68399c0e473852af43fba0c48ca60c3" gracePeriod=600 Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.776210 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-77f454d56c-cxqfd" Jan 23 07:08:16 crc kubenswrapper[5102]: I0123 07:08:16.848568 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-k7ghh"] Jan 23 07:08:17 crc kubenswrapper[5102]: I0123 07:08:17.777782 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="a3ea4a6a5f40da62670a32045877b314d68399c0e473852af43fba0c48ca60c3" exitCode=0 Jan 23 07:08:17 crc kubenswrapper[5102]: I0123 07:08:17.777909 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"a3ea4a6a5f40da62670a32045877b314d68399c0e473852af43fba0c48ca60c3"} Jan 23 07:08:17 crc kubenswrapper[5102]: I0123 07:08:17.778120 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"f472221ddd8fa6fce7a56b57a18ba14ffb89ee90fa252181919bdb5177527a31"} Jan 23 07:08:17 crc kubenswrapper[5102]: I0123 07:08:17.778334 5102 scope.go:117] "RemoveContainer" containerID="8e41675d87b3cf42aaa00755aec44fa42fed44d601cbcf5f4d3eb3b79c2bf254" Jan 23 07:08:21 crc kubenswrapper[5102]: E0123 07:08:21.233106 5102 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/system.slice/systemd-tmpfiles-clean.service\": RecentStats: unable to find data in memory cache]" Jan 23 07:08:25 crc kubenswrapper[5102]: I0123 07:08:25.838001 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-nmstate/nmstate-webhook-8474b5b9d8-fhrhh" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.541653 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56"] Jan 23 07:08:40 crc kubenswrapper[5102]: E0123 07:08:40.544637 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="extract-content" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.544741 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="extract-content" Jan 23 07:08:40 crc kubenswrapper[5102]: E0123 07:08:40.544839 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="extract-utilities" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.544903 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="extract-utilities" Jan 23 07:08:40 crc kubenswrapper[5102]: E0123 07:08:40.544964 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="registry-server" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.545033 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="registry-server" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.545206 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="193770b3-f315-42b1-aace-f67be7f43e88" containerName="registry-server" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.546275 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.548891 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.556088 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56"] Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.625181 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.625475 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cjh72\" (UniqueName: \"kubernetes.io/projected/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-kube-api-access-cjh72\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.625647 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.726693 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.726988 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cjh72\" (UniqueName: \"kubernetes.io/projected/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-kube-api-access-cjh72\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.727144 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.727517 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-util\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.727696 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-bundle\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.753555 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cjh72\" (UniqueName: \"kubernetes.io/projected/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-kube-api-access-cjh72\") pod \"270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:40 crc kubenswrapper[5102]: I0123 07:08:40.867224 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:41 crc kubenswrapper[5102]: I0123 07:08:41.122120 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56"] Jan 23 07:08:41 crc kubenswrapper[5102]: E0123 07:08:41.536125 5102 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d7a9743_8694_4eb1_8a4a_75d0264cc0a9.slice/crio-conmon-ff5ee590039f7469b8a08825ccace5d951f1e544a7d82e9c54778081abe34d50.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9d7a9743_8694_4eb1_8a4a_75d0264cc0a9.slice/crio-ff5ee590039f7469b8a08825ccace5d951f1e544a7d82e9c54778081abe34d50.scope\": RecentStats: unable to find data in memory cache]" Jan 23 07:08:41 crc kubenswrapper[5102]: I0123 07:08:41.913973 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/console-f9d7485db-k7ghh" podUID="4578060b-5283-42ea-aa38-c925d4265270" containerName="console" containerID="cri-o://fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3" gracePeriod=15 Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.000494 5102 generic.go:334] "Generic (PLEG): container finished" podID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerID="ff5ee590039f7469b8a08825ccace5d951f1e544a7d82e9c54778081abe34d50" exitCode=0 Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.000588 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" event={"ID":"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9","Type":"ContainerDied","Data":"ff5ee590039f7469b8a08825ccace5d951f1e544a7d82e9c54778081abe34d50"} Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.000639 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" event={"ID":"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9","Type":"ContainerStarted","Data":"de43bbe00bdb883c1e7b0e71efd880ef9d579ddbc77bce36fed8003376ee0650"} Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.340428 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-k7ghh_4578060b-5283-42ea-aa38-c925d4265270/console/0.log" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.340519 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.458664 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z7r4z\" (UniqueName: \"kubernetes.io/projected/4578060b-5283-42ea-aa38-c925d4265270-kube-api-access-z7r4z\") pod \"4578060b-5283-42ea-aa38-c925d4265270\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.458741 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-oauth-serving-cert\") pod \"4578060b-5283-42ea-aa38-c925d4265270\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.458797 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-serving-cert\") pod \"4578060b-5283-42ea-aa38-c925d4265270\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.458845 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-trusted-ca-bundle\") pod \"4578060b-5283-42ea-aa38-c925d4265270\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.458876 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-console-config\") pod \"4578060b-5283-42ea-aa38-c925d4265270\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.458935 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-service-ca\") pod \"4578060b-5283-42ea-aa38-c925d4265270\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.458958 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-oauth-config\") pod \"4578060b-5283-42ea-aa38-c925d4265270\" (UID: \"4578060b-5283-42ea-aa38-c925d4265270\") " Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.459894 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "4578060b-5283-42ea-aa38-c925d4265270" (UID: "4578060b-5283-42ea-aa38-c925d4265270"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.459904 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "4578060b-5283-42ea-aa38-c925d4265270" (UID: "4578060b-5283-42ea-aa38-c925d4265270"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.460843 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-console-config" (OuterVolumeSpecName: "console-config") pod "4578060b-5283-42ea-aa38-c925d4265270" (UID: "4578060b-5283-42ea-aa38-c925d4265270"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.460885 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-service-ca" (OuterVolumeSpecName: "service-ca") pod "4578060b-5283-42ea-aa38-c925d4265270" (UID: "4578060b-5283-42ea-aa38-c925d4265270"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.466817 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "4578060b-5283-42ea-aa38-c925d4265270" (UID: "4578060b-5283-42ea-aa38-c925d4265270"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.467248 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4578060b-5283-42ea-aa38-c925d4265270-kube-api-access-z7r4z" (OuterVolumeSpecName: "kube-api-access-z7r4z") pod "4578060b-5283-42ea-aa38-c925d4265270" (UID: "4578060b-5283-42ea-aa38-c925d4265270"). InnerVolumeSpecName "kube-api-access-z7r4z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.467518 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "4578060b-5283-42ea-aa38-c925d4265270" (UID: "4578060b-5283-42ea-aa38-c925d4265270"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.561313 5102 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-service-ca\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.561357 5102 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-oauth-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.561372 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z7r4z\" (UniqueName: \"kubernetes.io/projected/4578060b-5283-42ea-aa38-c925d4265270-kube-api-access-z7r4z\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.561383 5102 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.561394 5102 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4578060b-5283-42ea-aa38-c925d4265270-console-serving-cert\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.561403 5102 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:42 crc kubenswrapper[5102]: I0123 07:08:42.561415 5102 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4578060b-5283-42ea-aa38-c925d4265270-console-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.009824 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-console_console-f9d7485db-k7ghh_4578060b-5283-42ea-aa38-c925d4265270/console/0.log" Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.009919 5102 generic.go:334] "Generic (PLEG): container finished" podID="4578060b-5283-42ea-aa38-c925d4265270" containerID="fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3" exitCode=2 Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.009965 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-k7ghh" event={"ID":"4578060b-5283-42ea-aa38-c925d4265270","Type":"ContainerDied","Data":"fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3"} Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.010014 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-k7ghh" event={"ID":"4578060b-5283-42ea-aa38-c925d4265270","Type":"ContainerDied","Data":"5b0c0927077f3de1316f4117793f10c51397e71c1b20ce4f4375ab5e4aa49c59"} Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.010040 5102 scope.go:117] "RemoveContainer" containerID="fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3" Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.010086 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-k7ghh" Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.051836 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-console/console-f9d7485db-k7ghh"] Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.057783 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-console/console-f9d7485db-k7ghh"] Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.061473 5102 scope.go:117] "RemoveContainer" containerID="fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3" Jan 23 07:08:43 crc kubenswrapper[5102]: E0123 07:08:43.062104 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3\": container with ID starting with fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3 not found: ID does not exist" containerID="fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3" Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.062408 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3"} err="failed to get container status \"fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3\": rpc error: code = NotFound desc = could not find container \"fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3\": container with ID starting with fdf58cb7f8ba5689a54e8a571a84ae28f4b45cba74740322fa99cff6fb922fd3 not found: ID does not exist" Jan 23 07:08:43 crc kubenswrapper[5102]: I0123 07:08:43.606357 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4578060b-5283-42ea-aa38-c925d4265270" path="/var/lib/kubelet/pods/4578060b-5283-42ea-aa38-c925d4265270/volumes" Jan 23 07:08:44 crc kubenswrapper[5102]: I0123 07:08:44.021407 5102 generic.go:334] "Generic (PLEG): container finished" podID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerID="70be08df98dc182fd515561f72b44890bf410f72a172c4ea48651b111d4cfb67" exitCode=0 Jan 23 07:08:44 crc kubenswrapper[5102]: I0123 07:08:44.021472 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" event={"ID":"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9","Type":"ContainerDied","Data":"70be08df98dc182fd515561f72b44890bf410f72a172c4ea48651b111d4cfb67"} Jan 23 07:08:45 crc kubenswrapper[5102]: I0123 07:08:45.033433 5102 generic.go:334] "Generic (PLEG): container finished" podID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerID="f2d6460d07ecf81cd57337129bcf62e4798af5fb7e095d3a4bd0d27ac0cc7ae2" exitCode=0 Jan 23 07:08:45 crc kubenswrapper[5102]: I0123 07:08:45.033499 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" event={"ID":"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9","Type":"ContainerDied","Data":"f2d6460d07ecf81cd57337129bcf62e4798af5fb7e095d3a4bd0d27ac0cc7ae2"} Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.306733 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.425149 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-bundle\") pod \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.425307 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cjh72\" (UniqueName: \"kubernetes.io/projected/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-kube-api-access-cjh72\") pod \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.425425 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-util\") pod \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\" (UID: \"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9\") " Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.427296 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-bundle" (OuterVolumeSpecName: "bundle") pod "9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" (UID: "9d7a9743-8694-4eb1-8a4a-75d0264cc0a9"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.436381 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-kube-api-access-cjh72" (OuterVolumeSpecName: "kube-api-access-cjh72") pod "9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" (UID: "9d7a9743-8694-4eb1-8a4a-75d0264cc0a9"). InnerVolumeSpecName "kube-api-access-cjh72". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.527711 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cjh72\" (UniqueName: \"kubernetes.io/projected/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-kube-api-access-cjh72\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.527758 5102 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.665389 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-util" (OuterVolumeSpecName: "util") pod "9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" (UID: "9d7a9743-8694-4eb1-8a4a-75d0264cc0a9"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:08:46 crc kubenswrapper[5102]: I0123 07:08:46.730073 5102 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/9d7a9743-8694-4eb1-8a4a-75d0264cc0a9-util\") on node \"crc\" DevicePath \"\"" Jan 23 07:08:47 crc kubenswrapper[5102]: I0123 07:08:47.049444 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" event={"ID":"9d7a9743-8694-4eb1-8a4a-75d0264cc0a9","Type":"ContainerDied","Data":"de43bbe00bdb883c1e7b0e71efd880ef9d579ddbc77bce36fed8003376ee0650"} Jan 23 07:08:47 crc kubenswrapper[5102]: I0123 07:08:47.049810 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="de43bbe00bdb883c1e7b0e71efd880ef9d579ddbc77bce36fed8003376ee0650" Jan 23 07:08:47 crc kubenswrapper[5102]: I0123 07:08:47.049616 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.282133 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-g927s"] Jan 23 07:08:54 crc kubenswrapper[5102]: E0123 07:08:54.282977 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerName="pull" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.282993 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerName="pull" Jan 23 07:08:54 crc kubenswrapper[5102]: E0123 07:08:54.283021 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4578060b-5283-42ea-aa38-c925d4265270" containerName="console" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.283028 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4578060b-5283-42ea-aa38-c925d4265270" containerName="console" Jan 23 07:08:54 crc kubenswrapper[5102]: E0123 07:08:54.283035 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerName="util" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.283043 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerName="util" Jan 23 07:08:54 crc kubenswrapper[5102]: E0123 07:08:54.283055 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerName="extract" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.283061 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerName="extract" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.283158 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4578060b-5283-42ea-aa38-c925d4265270" containerName="console" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.283174 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d7a9743-8694-4eb1-8a4a-75d0264cc0a9" containerName="extract" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.283907 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.302252 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g927s"] Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.446825 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4l85n\" (UniqueName: \"kubernetes.io/projected/f31f0788-6705-452e-993e-c81bc3cd8b5e-kube-api-access-4l85n\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.446874 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-catalog-content\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.446899 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-utilities\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.548410 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4l85n\" (UniqueName: \"kubernetes.io/projected/f31f0788-6705-452e-993e-c81bc3cd8b5e-kube-api-access-4l85n\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.548579 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-catalog-content\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.548634 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-utilities\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.549274 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-catalog-content\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.549443 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-utilities\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.576646 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4l85n\" (UniqueName: \"kubernetes.io/projected/f31f0788-6705-452e-993e-c81bc3cd8b5e-kube-api-access-4l85n\") pod \"certified-operators-g927s\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.602120 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:08:54 crc kubenswrapper[5102]: I0123 07:08:54.913045 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-g927s"] Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.106287 5102 generic.go:334] "Generic (PLEG): container finished" podID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerID="26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad" exitCode=0 Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.106347 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g927s" event={"ID":"f31f0788-6705-452e-993e-c81bc3cd8b5e","Type":"ContainerDied","Data":"26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad"} Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.106380 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g927s" event={"ID":"f31f0788-6705-452e-993e-c81bc3cd8b5e","Type":"ContainerStarted","Data":"231b12e47a049ffa3cf209b1a52c700ba6ecc5f0ce1d4a5128b68e6582cae0b9"} Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.720651 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6"] Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.722120 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.727963 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-cert" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.736617 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"openshift-service-ca.crt" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.736642 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"kube-root-ca.crt" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.736719 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-controller-manager-service-cert" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.736658 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"manager-account-dockercfg-z74vd" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.779237 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d8a876cd-3c97-46c3-9633-62bb2f06664a-webhook-cert\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.779306 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d8a876cd-3c97-46c3-9633-62bb2f06664a-apiservice-cert\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.779356 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q276p\" (UniqueName: \"kubernetes.io/projected/d8a876cd-3c97-46c3-9633-62bb2f06664a-kube-api-access-q276p\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.871782 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6"] Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.880296 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q276p\" (UniqueName: \"kubernetes.io/projected/d8a876cd-3c97-46c3-9633-62bb2f06664a-kube-api-access-q276p\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.880404 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d8a876cd-3c97-46c3-9633-62bb2f06664a-webhook-cert\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.880441 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d8a876cd-3c97-46c3-9633-62bb2f06664a-apiservice-cert\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.888530 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/d8a876cd-3c97-46c3-9633-62bb2f06664a-apiservice-cert\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.889010 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/d8a876cd-3c97-46c3-9633-62bb2f06664a-webhook-cert\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:55 crc kubenswrapper[5102]: I0123 07:08:55.903348 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q276p\" (UniqueName: \"kubernetes.io/projected/d8a876cd-3c97-46c3-9633-62bb2f06664a-kube-api-access-q276p\") pod \"metallb-operator-controller-manager-6f968486d9-tlfn6\" (UID: \"d8a876cd-3c97-46c3-9633-62bb2f06664a\") " pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.037968 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.179828 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg"] Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.183418 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.190272 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-dockercfg-h77s9" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.193219 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-operator-webhook-server-service-cert" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.197178 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.216642 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg"] Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.287016 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6452fc17-0e56-40dc-a2cc-5637175b0b81-apiservice-cert\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.287110 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6452fc17-0e56-40dc-a2cc-5637175b0b81-webhook-cert\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.287154 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xzjjp\" (UniqueName: \"kubernetes.io/projected/6452fc17-0e56-40dc-a2cc-5637175b0b81-kube-api-access-xzjjp\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.388856 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6452fc17-0e56-40dc-a2cc-5637175b0b81-webhook-cert\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.388905 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xzjjp\" (UniqueName: \"kubernetes.io/projected/6452fc17-0e56-40dc-a2cc-5637175b0b81-kube-api-access-xzjjp\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.388943 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6452fc17-0e56-40dc-a2cc-5637175b0b81-apiservice-cert\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.395612 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/6452fc17-0e56-40dc-a2cc-5637175b0b81-apiservice-cert\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.397829 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/6452fc17-0e56-40dc-a2cc-5637175b0b81-webhook-cert\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.410229 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6"] Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.428250 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xzjjp\" (UniqueName: \"kubernetes.io/projected/6452fc17-0e56-40dc-a2cc-5637175b0b81-kube-api-access-xzjjp\") pod \"metallb-operator-webhook-server-5557795bdd-9p5gg\" (UID: \"6452fc17-0e56-40dc-a2cc-5637175b0b81\") " pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: W0123 07:08:56.456675 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8a876cd_3c97_46c3_9633_62bb2f06664a.slice/crio-20bff0d6b78af3701dd5247733617ce2e473090b88c69e888f23fd5e5b02a472 WatchSource:0}: Error finding container 20bff0d6b78af3701dd5247733617ce2e473090b88c69e888f23fd5e5b02a472: Status 404 returned error can't find the container with id 20bff0d6b78af3701dd5247733617ce2e473090b88c69e888f23fd5e5b02a472 Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.529986 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:08:56 crc kubenswrapper[5102]: I0123 07:08:56.879661 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg"] Jan 23 07:08:56 crc kubenswrapper[5102]: W0123 07:08:56.891442 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6452fc17_0e56_40dc_a2cc_5637175b0b81.slice/crio-47da894dd5abd8d1bfef3485b0f60271f6fd9dacc4a0cbc6d586990fd39aa3f4 WatchSource:0}: Error finding container 47da894dd5abd8d1bfef3485b0f60271f6fd9dacc4a0cbc6d586990fd39aa3f4: Status 404 returned error can't find the container with id 47da894dd5abd8d1bfef3485b0f60271f6fd9dacc4a0cbc6d586990fd39aa3f4 Jan 23 07:08:57 crc kubenswrapper[5102]: I0123 07:08:57.124372 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" event={"ID":"d8a876cd-3c97-46c3-9633-62bb2f06664a","Type":"ContainerStarted","Data":"20bff0d6b78af3701dd5247733617ce2e473090b88c69e888f23fd5e5b02a472"} Jan 23 07:08:57 crc kubenswrapper[5102]: I0123 07:08:57.127916 5102 generic.go:334] "Generic (PLEG): container finished" podID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerID="17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7" exitCode=0 Jan 23 07:08:57 crc kubenswrapper[5102]: I0123 07:08:57.128047 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g927s" event={"ID":"f31f0788-6705-452e-993e-c81bc3cd8b5e","Type":"ContainerDied","Data":"17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7"} Jan 23 07:08:57 crc kubenswrapper[5102]: I0123 07:08:57.129447 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" event={"ID":"6452fc17-0e56-40dc-a2cc-5637175b0b81","Type":"ContainerStarted","Data":"47da894dd5abd8d1bfef3485b0f60271f6fd9dacc4a0cbc6d586990fd39aa3f4"} Jan 23 07:08:58 crc kubenswrapper[5102]: I0123 07:08:58.137833 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g927s" event={"ID":"f31f0788-6705-452e-993e-c81bc3cd8b5e","Type":"ContainerStarted","Data":"6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966"} Jan 23 07:08:58 crc kubenswrapper[5102]: I0123 07:08:58.165108 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-g927s" podStartSLOduration=1.475432896 podStartE2EDuration="4.165082413s" podCreationTimestamp="2026-01-23 07:08:54 +0000 UTC" firstStartedPulling="2026-01-23 07:08:55.108176078 +0000 UTC m=+885.928525053" lastFinishedPulling="2026-01-23 07:08:57.797825595 +0000 UTC m=+888.618174570" observedRunningTime="2026-01-23 07:08:58.164069253 +0000 UTC m=+888.984418238" watchObservedRunningTime="2026-01-23 07:08:58.165082413 +0000 UTC m=+888.985431388" Jan 23 07:09:01 crc kubenswrapper[5102]: I0123 07:09:01.168024 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" event={"ID":"d8a876cd-3c97-46c3-9633-62bb2f06664a","Type":"ContainerStarted","Data":"1a7f474850864aaf18661fa3fb4fca601a98a4ca3354461cadcc09fd3a1630a1"} Jan 23 07:09:01 crc kubenswrapper[5102]: I0123 07:09:01.170155 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:09:01 crc kubenswrapper[5102]: I0123 07:09:01.213177 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" podStartSLOduration=2.386385248 podStartE2EDuration="6.213144917s" podCreationTimestamp="2026-01-23 07:08:55 +0000 UTC" firstStartedPulling="2026-01-23 07:08:56.464291358 +0000 UTC m=+887.284640333" lastFinishedPulling="2026-01-23 07:09:00.291051017 +0000 UTC m=+891.111400002" observedRunningTime="2026-01-23 07:09:01.19795158 +0000 UTC m=+892.018300575" watchObservedRunningTime="2026-01-23 07:09:01.213144917 +0000 UTC m=+892.033493892" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.481648 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-685pf"] Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.483029 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.495452 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-685pf"] Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.505666 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xkfb4\" (UniqueName: \"kubernetes.io/projected/f9815288-967c-449d-a217-41bc899f9474-kube-api-access-xkfb4\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.506015 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-catalog-content\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.506161 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-utilities\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.607200 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xkfb4\" (UniqueName: \"kubernetes.io/projected/f9815288-967c-449d-a217-41bc899f9474-kube-api-access-xkfb4\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.607400 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-catalog-content\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.607453 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-utilities\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.608224 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-catalog-content\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.608814 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-utilities\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.635808 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xkfb4\" (UniqueName: \"kubernetes.io/projected/f9815288-967c-449d-a217-41bc899f9474-kube-api-access-xkfb4\") pod \"community-operators-685pf\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:02 crc kubenswrapper[5102]: I0123 07:09:02.800767 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:03 crc kubenswrapper[5102]: I0123 07:09:03.174652 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-685pf"] Jan 23 07:09:03 crc kubenswrapper[5102]: I0123 07:09:03.180689 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" event={"ID":"6452fc17-0e56-40dc-a2cc-5637175b0b81","Type":"ContainerStarted","Data":"09c43ca209a1bc6235cd0a34d6c84cf116aebb6bf312d1bf2e13b9b6e4c6e3c0"} Jan 23 07:09:03 crc kubenswrapper[5102]: I0123 07:09:03.180929 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:09:03 crc kubenswrapper[5102]: I0123 07:09:03.209776 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" podStartSLOduration=1.264293259 podStartE2EDuration="7.209746318s" podCreationTimestamp="2026-01-23 07:08:56 +0000 UTC" firstStartedPulling="2026-01-23 07:08:56.904205642 +0000 UTC m=+887.724554617" lastFinishedPulling="2026-01-23 07:09:02.849658701 +0000 UTC m=+893.670007676" observedRunningTime="2026-01-23 07:09:03.204436772 +0000 UTC m=+894.024785747" watchObservedRunningTime="2026-01-23 07:09:03.209746318 +0000 UTC m=+894.030095293" Jan 23 07:09:04 crc kubenswrapper[5102]: I0123 07:09:04.190893 5102 generic.go:334] "Generic (PLEG): container finished" podID="f9815288-967c-449d-a217-41bc899f9474" containerID="e3a15f895d738c986ffbf860f4e1b624a4a5eb0fd948e1105bd8788546633537" exitCode=0 Jan 23 07:09:04 crc kubenswrapper[5102]: I0123 07:09:04.191017 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-685pf" event={"ID":"f9815288-967c-449d-a217-41bc899f9474","Type":"ContainerDied","Data":"e3a15f895d738c986ffbf860f4e1b624a4a5eb0fd948e1105bd8788546633537"} Jan 23 07:09:04 crc kubenswrapper[5102]: I0123 07:09:04.191617 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-685pf" event={"ID":"f9815288-967c-449d-a217-41bc899f9474","Type":"ContainerStarted","Data":"8aa62efb1d207188dba6b34e7dcfcdc7214208bcd7b439453e66d0240e5f8786"} Jan 23 07:09:04 crc kubenswrapper[5102]: I0123 07:09:04.602705 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:09:04 crc kubenswrapper[5102]: I0123 07:09:04.602789 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:09:04 crc kubenswrapper[5102]: I0123 07:09:04.668410 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:09:05 crc kubenswrapper[5102]: I0123 07:09:05.201642 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-685pf" event={"ID":"f9815288-967c-449d-a217-41bc899f9474","Type":"ContainerStarted","Data":"b838a66474f68c91efb914ca1deacd207700d7de5130c34e5458ab092a9396e8"} Jan 23 07:09:05 crc kubenswrapper[5102]: I0123 07:09:05.256051 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:09:06 crc kubenswrapper[5102]: I0123 07:09:06.209278 5102 generic.go:334] "Generic (PLEG): container finished" podID="f9815288-967c-449d-a217-41bc899f9474" containerID="b838a66474f68c91efb914ca1deacd207700d7de5130c34e5458ab092a9396e8" exitCode=0 Jan 23 07:09:06 crc kubenswrapper[5102]: I0123 07:09:06.209416 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-685pf" event={"ID":"f9815288-967c-449d-a217-41bc899f9474","Type":"ContainerDied","Data":"b838a66474f68c91efb914ca1deacd207700d7de5130c34e5458ab092a9396e8"} Jan 23 07:09:07 crc kubenswrapper[5102]: I0123 07:09:07.061379 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g927s"] Jan 23 07:09:07 crc kubenswrapper[5102]: I0123 07:09:07.220830 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-685pf" event={"ID":"f9815288-967c-449d-a217-41bc899f9474","Type":"ContainerStarted","Data":"87ba9c3da83327c6810aa6ba31652cb4a8df133b498a7868c6554fa4f657e317"} Jan 23 07:09:07 crc kubenswrapper[5102]: I0123 07:09:07.221055 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-g927s" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="registry-server" containerID="cri-o://6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966" gracePeriod=2 Jan 23 07:09:07 crc kubenswrapper[5102]: I0123 07:09:07.243166 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-685pf" podStartSLOduration=2.8023586419999997 podStartE2EDuration="5.243134501s" podCreationTimestamp="2026-01-23 07:09:02 +0000 UTC" firstStartedPulling="2026-01-23 07:09:04.193943694 +0000 UTC m=+895.014292699" lastFinishedPulling="2026-01-23 07:09:06.634719573 +0000 UTC m=+897.455068558" observedRunningTime="2026-01-23 07:09:07.242695758 +0000 UTC m=+898.063044733" watchObservedRunningTime="2026-01-23 07:09:07.243134501 +0000 UTC m=+898.063483476" Jan 23 07:09:08 crc kubenswrapper[5102]: I0123 07:09:08.771270 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:09:08 crc kubenswrapper[5102]: I0123 07:09:08.906839 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-utilities\") pod \"f31f0788-6705-452e-993e-c81bc3cd8b5e\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " Jan 23 07:09:08 crc kubenswrapper[5102]: I0123 07:09:08.906916 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-catalog-content\") pod \"f31f0788-6705-452e-993e-c81bc3cd8b5e\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " Jan 23 07:09:08 crc kubenswrapper[5102]: I0123 07:09:08.906968 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4l85n\" (UniqueName: \"kubernetes.io/projected/f31f0788-6705-452e-993e-c81bc3cd8b5e-kube-api-access-4l85n\") pod \"f31f0788-6705-452e-993e-c81bc3cd8b5e\" (UID: \"f31f0788-6705-452e-993e-c81bc3cd8b5e\") " Jan 23 07:09:08 crc kubenswrapper[5102]: I0123 07:09:08.908525 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-utilities" (OuterVolumeSpecName: "utilities") pod "f31f0788-6705-452e-993e-c81bc3cd8b5e" (UID: "f31f0788-6705-452e-993e-c81bc3cd8b5e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:09:08 crc kubenswrapper[5102]: I0123 07:09:08.916706 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f31f0788-6705-452e-993e-c81bc3cd8b5e-kube-api-access-4l85n" (OuterVolumeSpecName: "kube-api-access-4l85n") pod "f31f0788-6705-452e-993e-c81bc3cd8b5e" (UID: "f31f0788-6705-452e-993e-c81bc3cd8b5e"). InnerVolumeSpecName "kube-api-access-4l85n". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:09:08 crc kubenswrapper[5102]: I0123 07:09:08.962485 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f31f0788-6705-452e-993e-c81bc3cd8b5e" (UID: "f31f0788-6705-452e-993e-c81bc3cd8b5e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.009401 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4l85n\" (UniqueName: \"kubernetes.io/projected/f31f0788-6705-452e-993e-c81bc3cd8b5e-kube-api-access-4l85n\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.009453 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.009464 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f31f0788-6705-452e-993e-c81bc3cd8b5e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.237834 5102 generic.go:334] "Generic (PLEG): container finished" podID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerID="6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966" exitCode=0 Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.237893 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g927s" event={"ID":"f31f0788-6705-452e-993e-c81bc3cd8b5e","Type":"ContainerDied","Data":"6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966"} Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.237907 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-g927s" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.237939 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-g927s" event={"ID":"f31f0788-6705-452e-993e-c81bc3cd8b5e","Type":"ContainerDied","Data":"231b12e47a049ffa3cf209b1a52c700ba6ecc5f0ce1d4a5128b68e6582cae0b9"} Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.237962 5102 scope.go:117] "RemoveContainer" containerID="6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.258574 5102 scope.go:117] "RemoveContainer" containerID="17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.273291 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-g927s"] Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.278519 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-g927s"] Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.289758 5102 scope.go:117] "RemoveContainer" containerID="26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.305568 5102 scope.go:117] "RemoveContainer" containerID="6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966" Jan 23 07:09:09 crc kubenswrapper[5102]: E0123 07:09:09.306257 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966\": container with ID starting with 6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966 not found: ID does not exist" containerID="6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.306317 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966"} err="failed to get container status \"6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966\": rpc error: code = NotFound desc = could not find container \"6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966\": container with ID starting with 6d48eac572eed5a5e980b9eff379b28e74c576c5b70c13684c54902d97acc966 not found: ID does not exist" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.306360 5102 scope.go:117] "RemoveContainer" containerID="17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7" Jan 23 07:09:09 crc kubenswrapper[5102]: E0123 07:09:09.307016 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7\": container with ID starting with 17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7 not found: ID does not exist" containerID="17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.307053 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7"} err="failed to get container status \"17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7\": rpc error: code = NotFound desc = could not find container \"17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7\": container with ID starting with 17aa5d8a6b32d404155424a8170eb72de480e505b3c16d472dbf9b123a912fd7 not found: ID does not exist" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.307081 5102 scope.go:117] "RemoveContainer" containerID="26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad" Jan 23 07:09:09 crc kubenswrapper[5102]: E0123 07:09:09.307383 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad\": container with ID starting with 26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad not found: ID does not exist" containerID="26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.307441 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad"} err="failed to get container status \"26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad\": rpc error: code = NotFound desc = could not find container \"26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad\": container with ID starting with 26f3f4804193e2dfc6f6c9d39318ba6d13ea977e0226ae438997f04ca39bebad not found: ID does not exist" Jan 23 07:09:09 crc kubenswrapper[5102]: I0123 07:09:09.608843 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" path="/var/lib/kubelet/pods/f31f0788-6705-452e-993e-c81bc3cd8b5e/volumes" Jan 23 07:09:12 crc kubenswrapper[5102]: I0123 07:09:12.802198 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:12 crc kubenswrapper[5102]: I0123 07:09:12.803797 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:12 crc kubenswrapper[5102]: I0123 07:09:12.863225 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:13 crc kubenswrapper[5102]: I0123 07:09:13.341313 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:15 crc kubenswrapper[5102]: I0123 07:09:15.263675 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-685pf"] Jan 23 07:09:15 crc kubenswrapper[5102]: I0123 07:09:15.295113 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-685pf" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="registry-server" containerID="cri-o://87ba9c3da83327c6810aa6ba31652cb4a8df133b498a7868c6554fa4f657e317" gracePeriod=2 Jan 23 07:09:16 crc kubenswrapper[5102]: I0123 07:09:16.569046 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-webhook-server-5557795bdd-9p5gg" Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.316249 5102 generic.go:334] "Generic (PLEG): container finished" podID="f9815288-967c-449d-a217-41bc899f9474" containerID="87ba9c3da83327c6810aa6ba31652cb4a8df133b498a7868c6554fa4f657e317" exitCode=0 Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.316345 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-685pf" event={"ID":"f9815288-967c-449d-a217-41bc899f9474","Type":"ContainerDied","Data":"87ba9c3da83327c6810aa6ba31652cb4a8df133b498a7868c6554fa4f657e317"} Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.638819 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.752838 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-catalog-content\") pod \"f9815288-967c-449d-a217-41bc899f9474\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.752964 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-utilities\") pod \"f9815288-967c-449d-a217-41bc899f9474\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.753027 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xkfb4\" (UniqueName: \"kubernetes.io/projected/f9815288-967c-449d-a217-41bc899f9474-kube-api-access-xkfb4\") pod \"f9815288-967c-449d-a217-41bc899f9474\" (UID: \"f9815288-967c-449d-a217-41bc899f9474\") " Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.754470 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-utilities" (OuterVolumeSpecName: "utilities") pod "f9815288-967c-449d-a217-41bc899f9474" (UID: "f9815288-967c-449d-a217-41bc899f9474"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.762071 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9815288-967c-449d-a217-41bc899f9474-kube-api-access-xkfb4" (OuterVolumeSpecName: "kube-api-access-xkfb4") pod "f9815288-967c-449d-a217-41bc899f9474" (UID: "f9815288-967c-449d-a217-41bc899f9474"). InnerVolumeSpecName "kube-api-access-xkfb4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.811128 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f9815288-967c-449d-a217-41bc899f9474" (UID: "f9815288-967c-449d-a217-41bc899f9474"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.854915 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.854963 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xkfb4\" (UniqueName: \"kubernetes.io/projected/f9815288-967c-449d-a217-41bc899f9474-kube-api-access-xkfb4\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:17 crc kubenswrapper[5102]: I0123 07:09:17.854980 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f9815288-967c-449d-a217-41bc899f9474-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:18 crc kubenswrapper[5102]: I0123 07:09:18.327226 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-685pf" event={"ID":"f9815288-967c-449d-a217-41bc899f9474","Type":"ContainerDied","Data":"8aa62efb1d207188dba6b34e7dcfcdc7214208bcd7b439453e66d0240e5f8786"} Jan 23 07:09:18 crc kubenswrapper[5102]: I0123 07:09:18.327307 5102 scope.go:117] "RemoveContainer" containerID="87ba9c3da83327c6810aa6ba31652cb4a8df133b498a7868c6554fa4f657e317" Jan 23 07:09:18 crc kubenswrapper[5102]: I0123 07:09:18.327334 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-685pf" Jan 23 07:09:18 crc kubenswrapper[5102]: I0123 07:09:18.356019 5102 scope.go:117] "RemoveContainer" containerID="b838a66474f68c91efb914ca1deacd207700d7de5130c34e5458ab092a9396e8" Jan 23 07:09:18 crc kubenswrapper[5102]: I0123 07:09:18.402108 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-685pf"] Jan 23 07:09:18 crc kubenswrapper[5102]: I0123 07:09:18.403166 5102 scope.go:117] "RemoveContainer" containerID="e3a15f895d738c986ffbf860f4e1b624a4a5eb0fd948e1105bd8788546633537" Jan 23 07:09:18 crc kubenswrapper[5102]: I0123 07:09:18.415983 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-685pf"] Jan 23 07:09:19 crc kubenswrapper[5102]: I0123 07:09:19.612460 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9815288-967c-449d-a217-41bc899f9474" path="/var/lib/kubelet/pods/f9815288-967c-449d-a217-41bc899f9474/volumes" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.722342 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-s5nvc"] Jan 23 07:09:33 crc kubenswrapper[5102]: E0123 07:09:33.723786 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="registry-server" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.723821 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="registry-server" Jan 23 07:09:33 crc kubenswrapper[5102]: E0123 07:09:33.723870 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="registry-server" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.723889 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="registry-server" Jan 23 07:09:33 crc kubenswrapper[5102]: E0123 07:09:33.723926 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="extract-utilities" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.723941 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="extract-utilities" Jan 23 07:09:33 crc kubenswrapper[5102]: E0123 07:09:33.723962 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="extract-utilities" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.723976 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="extract-utilities" Jan 23 07:09:33 crc kubenswrapper[5102]: E0123 07:09:33.723994 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="extract-content" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.724007 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="extract-content" Jan 23 07:09:33 crc kubenswrapper[5102]: E0123 07:09:33.724023 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="extract-content" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.724036 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="extract-content" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.724254 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9815288-967c-449d-a217-41bc899f9474" containerName="registry-server" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.724273 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f31f0788-6705-452e-993e-c81bc3cd8b5e" containerName="registry-server" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.725676 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.739965 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s5nvc"] Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.815005 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-utilities\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.815091 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-catalog-content\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.815135 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8h2fh\" (UniqueName: \"kubernetes.io/projected/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-kube-api-access-8h2fh\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.916953 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-utilities\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.917025 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-catalog-content\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.917050 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8h2fh\" (UniqueName: \"kubernetes.io/projected/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-kube-api-access-8h2fh\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.917649 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-utilities\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.917804 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-catalog-content\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:33 crc kubenswrapper[5102]: I0123 07:09:33.938429 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8h2fh\" (UniqueName: \"kubernetes.io/projected/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-kube-api-access-8h2fh\") pod \"redhat-marketplace-s5nvc\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:34 crc kubenswrapper[5102]: I0123 07:09:34.052052 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:34 crc kubenswrapper[5102]: I0123 07:09:34.307570 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-s5nvc"] Jan 23 07:09:34 crc kubenswrapper[5102]: I0123 07:09:34.455593 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s5nvc" event={"ID":"4600a5eb-d7aa-41e7-a272-13a448d4f5cd","Type":"ContainerStarted","Data":"305e07a5edb579b67e407dd3c8add8b78b1490f1bfa2b33f54218418c2ef153f"} Jan 23 07:09:35 crc kubenswrapper[5102]: I0123 07:09:35.468603 5102 generic.go:334] "Generic (PLEG): container finished" podID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerID="4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719" exitCode=0 Jan 23 07:09:35 crc kubenswrapper[5102]: I0123 07:09:35.468756 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s5nvc" event={"ID":"4600a5eb-d7aa-41e7-a272-13a448d4f5cd","Type":"ContainerDied","Data":"4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719"} Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.044077 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/metallb-operator-controller-manager-6f968486d9-tlfn6" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.488123 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s5nvc" event={"ID":"4600a5eb-d7aa-41e7-a272-13a448d4f5cd","Type":"ContainerStarted","Data":"eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757"} Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.827250 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-845hr"] Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.830497 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.843391 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-certs-secret" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.843565 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-daemon-dockercfg-qdbz6" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.843916 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"frr-startup" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.848878 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb"] Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.849869 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.851853 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"frr-k8s-webhook-server-cert" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.870737 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb"] Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.944435 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/speaker-44g7f"] Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.945572 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-44g7f" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.951095 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-dockercfg-k4prm" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.951830 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"speaker-certs-secret" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.952088 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"metallb-system"/"metallb-excludel2" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.952120 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-memberlist" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.962612 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["metallb-system/controller-6968d8fdc4-6pgbp"] Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.964103 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.965845 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"controller-certs-secret" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.968976 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b931d587-70ae-4f83-bc54-507647f32f2a-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-txjhb\" (UID: \"b931d587-70ae-4f83-bc54-507647f32f2a\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969050 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ghrg\" (UniqueName: \"kubernetes.io/projected/20940568-aa3c-4567-8a76-8cc4508bf6ff-kube-api-access-4ghrg\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969079 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-reloader\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969101 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-conf\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969116 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics-certs\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969133 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-sockets\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969160 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x7xdr\" (UniqueName: \"kubernetes.io/projected/b931d587-70ae-4f83-bc54-507647f32f2a-kube-api-access-x7xdr\") pod \"frr-k8s-webhook-server-7df86c4f6c-txjhb\" (UID: \"b931d587-70ae-4f83-bc54-507647f32f2a\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969188 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.969225 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-startup\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:36 crc kubenswrapper[5102]: I0123 07:09:36.999506 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-6pgbp"] Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071107 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x7xdr\" (UniqueName: \"kubernetes.io/projected/b931d587-70ae-4f83-bc54-507647f32f2a-kube-api-access-x7xdr\") pod \"frr-k8s-webhook-server-7df86c4f6c-txjhb\" (UID: \"b931d587-70ae-4f83-bc54-507647f32f2a\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071174 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071206 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8xjch\" (UniqueName: \"kubernetes.io/projected/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-kube-api-access-8xjch\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071224 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8qpkj\" (UniqueName: \"kubernetes.io/projected/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-kube-api-access-8qpkj\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071252 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metallb-excludel2\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071270 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-startup\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071288 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b931d587-70ae-4f83-bc54-507647f32f2a-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-txjhb\" (UID: \"b931d587-70ae-4f83-bc54-507647f32f2a\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071323 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071343 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-metrics-certs\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071361 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metrics-certs\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071399 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ghrg\" (UniqueName: \"kubernetes.io/projected/20940568-aa3c-4567-8a76-8cc4508bf6ff-kube-api-access-4ghrg\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071425 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-reloader\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071455 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-conf\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071478 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics-certs\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071499 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-cert\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.071522 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-sockets\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.072034 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-sockets\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-sockets\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.072608 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.073434 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-startup\" (UniqueName: \"kubernetes.io/configmap/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-startup\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.073678 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"reloader\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-reloader\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.073918 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"frr-conf\" (UniqueName: \"kubernetes.io/empty-dir/20940568-aa3c-4567-8a76-8cc4508bf6ff-frr-conf\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.073994 5102 secret.go:188] Couldn't get secret metallb-system/frr-k8s-certs-secret: secret "frr-k8s-certs-secret" not found Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.074047 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics-certs podName:20940568-aa3c-4567-8a76-8cc4508bf6ff nodeName:}" failed. No retries permitted until 2026-01-23 07:09:37.574031212 +0000 UTC m=+928.394380187 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics-certs") pod "frr-k8s-845hr" (UID: "20940568-aa3c-4567-8a76-8cc4508bf6ff") : secret "frr-k8s-certs-secret" not found Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.089106 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/b931d587-70ae-4f83-bc54-507647f32f2a-cert\") pod \"frr-k8s-webhook-server-7df86c4f6c-txjhb\" (UID: \"b931d587-70ae-4f83-bc54-507647f32f2a\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.095125 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x7xdr\" (UniqueName: \"kubernetes.io/projected/b931d587-70ae-4f83-bc54-507647f32f2a-kube-api-access-x7xdr\") pod \"frr-k8s-webhook-server-7df86c4f6c-txjhb\" (UID: \"b931d587-70ae-4f83-bc54-507647f32f2a\") " pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.095336 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ghrg\" (UniqueName: \"kubernetes.io/projected/20940568-aa3c-4567-8a76-8cc4508bf6ff-kube-api-access-4ghrg\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.164101 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.172625 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8xjch\" (UniqueName: \"kubernetes.io/projected/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-kube-api-access-8xjch\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.172684 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8qpkj\" (UniqueName: \"kubernetes.io/projected/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-kube-api-access-8qpkj\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.172716 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metallb-excludel2\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.172765 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.172784 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-metrics-certs\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.172800 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metrics-certs\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.172851 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-cert\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.174248 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metallb-excludel2\" (UniqueName: \"kubernetes.io/configmap/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metallb-excludel2\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.174346 5102 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.174409 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist podName:1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0 nodeName:}" failed. No retries permitted until 2026-01-23 07:09:37.674392162 +0000 UTC m=+928.494741137 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist") pod "speaker-44g7f" (UID: "1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0") : secret "metallb-memberlist" not found Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.174758 5102 secret.go:188] Couldn't get secret metallb-system/speaker-certs-secret: secret "speaker-certs-secret" not found Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.175700 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metrics-certs podName:1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0 nodeName:}" failed. No retries permitted until 2026-01-23 07:09:37.675660479 +0000 UTC m=+928.496009604 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metrics-certs") pod "speaker-44g7f" (UID: "1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0") : secret "speaker-certs-secret" not found Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.179458 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-metrics-certs\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.183352 5102 reflector.go:368] Caches populated for *v1.Secret from object-"metallb-system"/"metallb-webhook-cert" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.196052 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8xjch\" (UniqueName: \"kubernetes.io/projected/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-kube-api-access-8xjch\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.197005 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8qpkj\" (UniqueName: \"kubernetes.io/projected/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-kube-api-access-8qpkj\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.198671 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95-cert\") pod \"controller-6968d8fdc4-6pgbp\" (UID: \"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95\") " pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.278023 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.448353 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb"] Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.512495 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" event={"ID":"b931d587-70ae-4f83-bc54-507647f32f2a","Type":"ContainerStarted","Data":"9fb070b99ea008ead0bdd0e8b3b66241a8edc6456a722c114f1d7f28a9e70bc5"} Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.516264 5102 generic.go:334] "Generic (PLEG): container finished" podID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerID="eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757" exitCode=0 Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.516324 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s5nvc" event={"ID":"4600a5eb-d7aa-41e7-a272-13a448d4f5cd","Type":"ContainerDied","Data":"eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757"} Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.535428 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["metallb-system/controller-6968d8fdc4-6pgbp"] Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.602793 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics-certs\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.608623 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/20940568-aa3c-4567-8a76-8cc4508bf6ff-metrics-certs\") pod \"frr-k8s-845hr\" (UID: \"20940568-aa3c-4567-8a76-8cc4508bf6ff\") " pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.704678 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.705378 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metrics-certs\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.704909 5102 secret.go:188] Couldn't get secret metallb-system/metallb-memberlist: secret "metallb-memberlist" not found Jan 23 07:09:37 crc kubenswrapper[5102]: E0123 07:09:37.705753 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist podName:1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0 nodeName:}" failed. No retries permitted until 2026-01-23 07:09:38.705709773 +0000 UTC m=+929.526058748 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "memberlist" (UniqueName: "kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist") pod "speaker-44g7f" (UID: "1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0") : secret "metallb-memberlist" not found Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.713128 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-metrics-certs\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:37 crc kubenswrapper[5102]: I0123 07:09:37.758160 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.529033 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-6pgbp" event={"ID":"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95","Type":"ContainerStarted","Data":"d228840dabbc864efd6adb6fcf839ced7f5d012d4ee57abd13f72dd7f669d157"} Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.530708 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-6pgbp" event={"ID":"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95","Type":"ContainerStarted","Data":"3d0ab72ba97a422b5226abdbeb3b916263d2878e29d3f070d37becc6d7b5632e"} Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.530725 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/controller-6968d8fdc4-6pgbp" event={"ID":"3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95","Type":"ContainerStarted","Data":"8f05ff9e68dcdf1540167687c97fb9cb513caa681e867f063f2e573bf5c453bd"} Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.530737 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerStarted","Data":"8c82df8df6766c16265761b9f8adde1418b91a061ad2a501166501a28a016e7e"} Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.530759 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.532322 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s5nvc" event={"ID":"4600a5eb-d7aa-41e7-a272-13a448d4f5cd","Type":"ContainerStarted","Data":"519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d"} Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.553656 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/controller-6968d8fdc4-6pgbp" podStartSLOduration=2.5536330019999998 podStartE2EDuration="2.553633002s" podCreationTimestamp="2026-01-23 07:09:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:09:38.550092708 +0000 UTC m=+929.370441683" watchObservedRunningTime="2026-01-23 07:09:38.553633002 +0000 UTC m=+929.373981967" Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.579359 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-s5nvc" podStartSLOduration=3.13862761 podStartE2EDuration="5.579334188s" podCreationTimestamp="2026-01-23 07:09:33 +0000 UTC" firstStartedPulling="2026-01-23 07:09:35.471826436 +0000 UTC m=+926.292175451" lastFinishedPulling="2026-01-23 07:09:37.912533054 +0000 UTC m=+928.732882029" observedRunningTime="2026-01-23 07:09:38.576483124 +0000 UTC m=+929.396832119" watchObservedRunningTime="2026-01-23 07:09:38.579334188 +0000 UTC m=+929.399683173" Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.725728 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.734073 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memberlist\" (UniqueName: \"kubernetes.io/secret/1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0-memberlist\") pod \"speaker-44g7f\" (UID: \"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0\") " pod="metallb-system/speaker-44g7f" Jan 23 07:09:38 crc kubenswrapper[5102]: I0123 07:09:38.760733 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="metallb-system/speaker-44g7f" Jan 23 07:09:39 crc kubenswrapper[5102]: I0123 07:09:39.549248 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-44g7f" event={"ID":"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0","Type":"ContainerStarted","Data":"b975fa98fbe2ffff97493e07c860377ce5b29c1107f6fd774e7844bd327ea2b3"} Jan 23 07:09:39 crc kubenswrapper[5102]: I0123 07:09:39.549796 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-44g7f" event={"ID":"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0","Type":"ContainerStarted","Data":"932334dcbdcfc4be096b45bab83e1ba30b6e3ed995260aa69e2b0545873b58da"} Jan 23 07:09:40 crc kubenswrapper[5102]: I0123 07:09:40.565452 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/speaker-44g7f" event={"ID":"1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0","Type":"ContainerStarted","Data":"74a8b9e69dda09100af3f383bc60248c6ed6189885bb4d85ce0ae2b9f36437e5"} Jan 23 07:09:40 crc kubenswrapper[5102]: I0123 07:09:40.565714 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/speaker-44g7f" Jan 23 07:09:40 crc kubenswrapper[5102]: I0123 07:09:40.589913 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/speaker-44g7f" podStartSLOduration=4.58988999 podStartE2EDuration="4.58988999s" podCreationTimestamp="2026-01-23 07:09:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:09:40.586434528 +0000 UTC m=+931.406783503" watchObservedRunningTime="2026-01-23 07:09:40.58988999 +0000 UTC m=+931.410238965" Jan 23 07:09:44 crc kubenswrapper[5102]: I0123 07:09:44.052305 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:44 crc kubenswrapper[5102]: I0123 07:09:44.052982 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:44 crc kubenswrapper[5102]: I0123 07:09:44.122482 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:44 crc kubenswrapper[5102]: I0123 07:09:44.718171 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:44 crc kubenswrapper[5102]: I0123 07:09:44.771614 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s5nvc"] Jan 23 07:09:46 crc kubenswrapper[5102]: I0123 07:09:46.676976 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-s5nvc" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="registry-server" containerID="cri-o://519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d" gracePeriod=2 Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.149175 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.288820 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/controller-6968d8fdc4-6pgbp" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.305580 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-catalog-content\") pod \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.305707 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-utilities\") pod \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.305874 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8h2fh\" (UniqueName: \"kubernetes.io/projected/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-kube-api-access-8h2fh\") pod \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\" (UID: \"4600a5eb-d7aa-41e7-a272-13a448d4f5cd\") " Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.307365 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-utilities" (OuterVolumeSpecName: "utilities") pod "4600a5eb-d7aa-41e7-a272-13a448d4f5cd" (UID: "4600a5eb-d7aa-41e7-a272-13a448d4f5cd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.317519 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-kube-api-access-8h2fh" (OuterVolumeSpecName: "kube-api-access-8h2fh") pod "4600a5eb-d7aa-41e7-a272-13a448d4f5cd" (UID: "4600a5eb-d7aa-41e7-a272-13a448d4f5cd"). InnerVolumeSpecName "kube-api-access-8h2fh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.335870 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4600a5eb-d7aa-41e7-a272-13a448d4f5cd" (UID: "4600a5eb-d7aa-41e7-a272-13a448d4f5cd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.407962 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.408095 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.408253 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8h2fh\" (UniqueName: \"kubernetes.io/projected/4600a5eb-d7aa-41e7-a272-13a448d4f5cd-kube-api-access-8h2fh\") on node \"crc\" DevicePath \"\"" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.686936 5102 generic.go:334] "Generic (PLEG): container finished" podID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerID="519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d" exitCode=0 Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.687640 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s5nvc" event={"ID":"4600a5eb-d7aa-41e7-a272-13a448d4f5cd","Type":"ContainerDied","Data":"519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d"} Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.687730 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-s5nvc" event={"ID":"4600a5eb-d7aa-41e7-a272-13a448d4f5cd","Type":"ContainerDied","Data":"305e07a5edb579b67e407dd3c8add8b78b1490f1bfa2b33f54218418c2ef153f"} Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.687767 5102 scope.go:117] "RemoveContainer" containerID="519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.688773 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-s5nvc" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.690101 5102 generic.go:334] "Generic (PLEG): container finished" podID="20940568-aa3c-4567-8a76-8cc4508bf6ff" containerID="65edd4d2d0112e2e18e2069e0dd3df5fa9948d7d96d8cf01683c4c3112015ed9" exitCode=0 Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.690561 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerDied","Data":"65edd4d2d0112e2e18e2069e0dd3df5fa9948d7d96d8cf01683c4c3112015ed9"} Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.694130 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" event={"ID":"b931d587-70ae-4f83-bc54-507647f32f2a","Type":"ContainerStarted","Data":"72fd9eedd002e6d796c9d1489178387274db7c95418dad2fe5d2479e33d8b211"} Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.694392 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.717884 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-s5nvc"] Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.723291 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-s5nvc"] Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.736200 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" podStartSLOduration=2.282355836 podStartE2EDuration="11.736176542s" podCreationTimestamp="2026-01-23 07:09:36 +0000 UTC" firstStartedPulling="2026-01-23 07:09:37.45900803 +0000 UTC m=+928.279357005" lastFinishedPulling="2026-01-23 07:09:46.912828746 +0000 UTC m=+937.733177711" observedRunningTime="2026-01-23 07:09:47.73327346 +0000 UTC m=+938.553622435" watchObservedRunningTime="2026-01-23 07:09:47.736176542 +0000 UTC m=+938.556525517" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.746066 5102 scope.go:117] "RemoveContainer" containerID="eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.802871 5102 scope.go:117] "RemoveContainer" containerID="4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.869503 5102 scope.go:117] "RemoveContainer" containerID="519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d" Jan 23 07:09:47 crc kubenswrapper[5102]: E0123 07:09:47.870108 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d\": container with ID starting with 519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d not found: ID does not exist" containerID="519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.870216 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d"} err="failed to get container status \"519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d\": rpc error: code = NotFound desc = could not find container \"519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d\": container with ID starting with 519bae414b7151072f802304a0cdf15dff695659f7085e803708a21b730a4d0d not found: ID does not exist" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.870275 5102 scope.go:117] "RemoveContainer" containerID="eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757" Jan 23 07:09:47 crc kubenswrapper[5102]: E0123 07:09:47.870642 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757\": container with ID starting with eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757 not found: ID does not exist" containerID="eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.870672 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757"} err="failed to get container status \"eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757\": rpc error: code = NotFound desc = could not find container \"eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757\": container with ID starting with eafce5d404fd9886cf9b910f76fbae31defdae3066d54921da5d9e39418d8757 not found: ID does not exist" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.870695 5102 scope.go:117] "RemoveContainer" containerID="4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719" Jan 23 07:09:47 crc kubenswrapper[5102]: E0123 07:09:47.870926 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719\": container with ID starting with 4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719 not found: ID does not exist" containerID="4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719" Jan 23 07:09:47 crc kubenswrapper[5102]: I0123 07:09:47.870953 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719"} err="failed to get container status \"4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719\": rpc error: code = NotFound desc = could not find container \"4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719\": container with ID starting with 4aac24825345d0ad355f1b1f4d37a618b84c6e3eaa457a949da3925856793719 not found: ID does not exist" Jan 23 07:09:48 crc kubenswrapper[5102]: I0123 07:09:48.707112 5102 generic.go:334] "Generic (PLEG): container finished" podID="20940568-aa3c-4567-8a76-8cc4508bf6ff" containerID="da02ba765ebbf75208f6ee6058fc4d42ca3bb600af762f9d69dde4e5f3a4f9c6" exitCode=0 Jan 23 07:09:48 crc kubenswrapper[5102]: I0123 07:09:48.708069 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerDied","Data":"da02ba765ebbf75208f6ee6058fc4d42ca3bb600af762f9d69dde4e5f3a4f9c6"} Jan 23 07:09:49 crc kubenswrapper[5102]: I0123 07:09:49.623283 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" path="/var/lib/kubelet/pods/4600a5eb-d7aa-41e7-a272-13a448d4f5cd/volumes" Jan 23 07:09:49 crc kubenswrapper[5102]: I0123 07:09:49.721723 5102 generic.go:334] "Generic (PLEG): container finished" podID="20940568-aa3c-4567-8a76-8cc4508bf6ff" containerID="2fd23e377b641f205e79638984a03d12e057cf3c505ee6c403b15e3775b50a89" exitCode=0 Jan 23 07:09:49 crc kubenswrapper[5102]: I0123 07:09:49.721798 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerDied","Data":"2fd23e377b641f205e79638984a03d12e057cf3c505ee6c403b15e3775b50a89"} Jan 23 07:09:50 crc kubenswrapper[5102]: I0123 07:09:50.742268 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerStarted","Data":"8adb3f6245655f220a5f0ae67200ca79189ba2a75d581e9d62f726a96715b785"} Jan 23 07:09:50 crc kubenswrapper[5102]: I0123 07:09:50.742364 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerStarted","Data":"53d531ef902d66a0b3184d674eb8cf7f710c94c5a1e62821240299318f6ffab2"} Jan 23 07:09:50 crc kubenswrapper[5102]: I0123 07:09:50.742380 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerStarted","Data":"0d3d521781a54dfa252680f98c29a11d43aab89f3aebe0f55c925eca5ca7f15f"} Jan 23 07:09:50 crc kubenswrapper[5102]: I0123 07:09:50.742394 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerStarted","Data":"25ae6dc96de46c7b637db7d16b64db3db42bed3dfb50524550ae870c942d8d8c"} Jan 23 07:09:50 crc kubenswrapper[5102]: I0123 07:09:50.742425 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerStarted","Data":"a4f0dce750877b3c1d85d80c18a903b93fb0cbd1a49d3696f5ff20841ba31a95"} Jan 23 07:09:51 crc kubenswrapper[5102]: I0123 07:09:51.754683 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="metallb-system/frr-k8s-845hr" event={"ID":"20940568-aa3c-4567-8a76-8cc4508bf6ff","Type":"ContainerStarted","Data":"2e083d4f4fbafc5e23210cc1d4b83c1b554103cdadfa8ba18c157797072ddd1e"} Jan 23 07:09:51 crc kubenswrapper[5102]: I0123 07:09:51.755445 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:51 crc kubenswrapper[5102]: I0123 07:09:51.795360 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="metallb-system/frr-k8s-845hr" podStartSLOduration=6.756963669 podStartE2EDuration="15.795328652s" podCreationTimestamp="2026-01-23 07:09:36 +0000 UTC" firstStartedPulling="2026-01-23 07:09:37.893733711 +0000 UTC m=+928.714082696" lastFinishedPulling="2026-01-23 07:09:46.932098704 +0000 UTC m=+937.752447679" observedRunningTime="2026-01-23 07:09:51.784210712 +0000 UTC m=+942.604559717" watchObservedRunningTime="2026-01-23 07:09:51.795328652 +0000 UTC m=+942.615677647" Jan 23 07:09:52 crc kubenswrapper[5102]: I0123 07:09:52.759562 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:52 crc kubenswrapper[5102]: I0123 07:09:52.832498 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="metallb-system/frr-k8s-845hr" Jan 23 07:09:57 crc kubenswrapper[5102]: I0123 07:09:57.174186 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-webhook-server-7df86c4f6c-txjhb" Jan 23 07:09:58 crc kubenswrapper[5102]: I0123 07:09:58.765885 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/speaker-44g7f" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.419060 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9"] Jan 23 07:10:00 crc kubenswrapper[5102]: E0123 07:10:00.419388 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="extract-utilities" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.419406 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="extract-utilities" Jan 23 07:10:00 crc kubenswrapper[5102]: E0123 07:10:00.419424 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="registry-server" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.419432 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="registry-server" Jan 23 07:10:00 crc kubenswrapper[5102]: E0123 07:10:00.419441 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="extract-content" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.419451 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="extract-content" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.419612 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4600a5eb-d7aa-41e7-a272-13a448d4f5cd" containerName="registry-server" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.420648 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.424328 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.445629 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9"] Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.534819 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.534878 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.534912 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w8mmr\" (UniqueName: \"kubernetes.io/projected/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-kube-api-access-w8mmr\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.636658 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.636715 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.636757 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w8mmr\" (UniqueName: \"kubernetes.io/projected/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-kube-api-access-w8mmr\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.637803 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.638106 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.659159 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w8mmr\" (UniqueName: \"kubernetes.io/projected/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-kube-api-access-w8mmr\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.739228 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:00 crc kubenswrapper[5102]: I0123 07:10:00.988876 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9"] Jan 23 07:10:01 crc kubenswrapper[5102]: I0123 07:10:01.828427 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" event={"ID":"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f","Type":"ContainerStarted","Data":"b5113443a501cce73622db77087af2acffc4c4ad54400e0e711ed4c60af3e127"} Jan 23 07:10:02 crc kubenswrapper[5102]: I0123 07:10:02.838323 5102 generic.go:334] "Generic (PLEG): container finished" podID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerID="a30846357141542ff59b565ba9fcf0df14e123d6333fee648031f88de77633e7" exitCode=0 Jan 23 07:10:02 crc kubenswrapper[5102]: I0123 07:10:02.838690 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" event={"ID":"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f","Type":"ContainerDied","Data":"a30846357141542ff59b565ba9fcf0df14e123d6333fee648031f88de77633e7"} Jan 23 07:10:06 crc kubenswrapper[5102]: I0123 07:10:06.874256 5102 generic.go:334] "Generic (PLEG): container finished" podID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerID="42b041ff9ef218e3e56111e529c2499284e1cd32aa31418bc067bfe4086058f8" exitCode=0 Jan 23 07:10:06 crc kubenswrapper[5102]: I0123 07:10:06.874310 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" event={"ID":"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f","Type":"ContainerDied","Data":"42b041ff9ef218e3e56111e529c2499284e1cd32aa31418bc067bfe4086058f8"} Jan 23 07:10:07 crc kubenswrapper[5102]: I0123 07:10:07.767177 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="metallb-system/frr-k8s-845hr" Jan 23 07:10:07 crc kubenswrapper[5102]: I0123 07:10:07.889701 5102 generic.go:334] "Generic (PLEG): container finished" podID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerID="7fd8dc63ffb48f0b15b820f8144e086608098ad65b968783844a2654837ac463" exitCode=0 Jan 23 07:10:07 crc kubenswrapper[5102]: I0123 07:10:07.889781 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" event={"ID":"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f","Type":"ContainerDied","Data":"7fd8dc63ffb48f0b15b820f8144e086608098ad65b968783844a2654837ac463"} Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.199776 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.393672 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w8mmr\" (UniqueName: \"kubernetes.io/projected/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-kube-api-access-w8mmr\") pod \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.393961 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-bundle\") pod \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.394161 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-util\") pod \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\" (UID: \"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f\") " Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.395104 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-bundle" (OuterVolumeSpecName: "bundle") pod "5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" (UID: "5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.406889 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-kube-api-access-w8mmr" (OuterVolumeSpecName: "kube-api-access-w8mmr") pod "5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" (UID: "5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f"). InnerVolumeSpecName "kube-api-access-w8mmr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.416797 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-util" (OuterVolumeSpecName: "util") pod "5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" (UID: "5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.496700 5102 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-util\") on node \"crc\" DevicePath \"\"" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.496762 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w8mmr\" (UniqueName: \"kubernetes.io/projected/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-kube-api-access-w8mmr\") on node \"crc\" DevicePath \"\"" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.496794 5102 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.912419 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" event={"ID":"5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f","Type":"ContainerDied","Data":"b5113443a501cce73622db77087af2acffc4c4ad54400e0e711ed4c60af3e127"} Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.912515 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b5113443a501cce73622db77087af2acffc4c4ad54400e0e711ed4c60af3e127" Jan 23 07:10:09 crc kubenswrapper[5102]: I0123 07:10:09.912516 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.077433 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp"] Jan 23 07:10:13 crc kubenswrapper[5102]: E0123 07:10:13.078364 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerName="util" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.078384 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerName="util" Jan 23 07:10:13 crc kubenswrapper[5102]: E0123 07:10:13.078404 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerName="pull" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.078412 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerName="pull" Jan 23 07:10:13 crc kubenswrapper[5102]: E0123 07:10:13.078442 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerName="extract" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.078452 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerName="extract" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.078622 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f" containerName="extract" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.079254 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.081892 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.081911 5102 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-46z7d" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.082165 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.092982 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp"] Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.255104 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8ecee4c0-0bd6-443f-9cae-509d40fac70f-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bf9mp\" (UID: \"8ecee4c0-0bd6-443f-9cae-509d40fac70f\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.255208 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k9fjq\" (UniqueName: \"kubernetes.io/projected/8ecee4c0-0bd6-443f-9cae-509d40fac70f-kube-api-access-k9fjq\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bf9mp\" (UID: \"8ecee4c0-0bd6-443f-9cae-509d40fac70f\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.357600 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8ecee4c0-0bd6-443f-9cae-509d40fac70f-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bf9mp\" (UID: \"8ecee4c0-0bd6-443f-9cae-509d40fac70f\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.357730 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k9fjq\" (UniqueName: \"kubernetes.io/projected/8ecee4c0-0bd6-443f-9cae-509d40fac70f-kube-api-access-k9fjq\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bf9mp\" (UID: \"8ecee4c0-0bd6-443f-9cae-509d40fac70f\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.358503 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/8ecee4c0-0bd6-443f-9cae-509d40fac70f-tmp\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bf9mp\" (UID: \"8ecee4c0-0bd6-443f-9cae-509d40fac70f\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.388601 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k9fjq\" (UniqueName: \"kubernetes.io/projected/8ecee4c0-0bd6-443f-9cae-509d40fac70f-kube-api-access-k9fjq\") pod \"cert-manager-operator-controller-manager-64cf6dff88-bf9mp\" (UID: \"8ecee4c0-0bd6-443f-9cae-509d40fac70f\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.405325 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.781915 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp"] Jan 23 07:10:13 crc kubenswrapper[5102]: W0123 07:10:13.794471 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8ecee4c0_0bd6_443f_9cae_509d40fac70f.slice/crio-8f53d4f1607467dc5373eac310b1d9d0d79558d67b5f9115a2be86e9aac83a07 WatchSource:0}: Error finding container 8f53d4f1607467dc5373eac310b1d9d0d79558d67b5f9115a2be86e9aac83a07: Status 404 returned error can't find the container with id 8f53d4f1607467dc5373eac310b1d9d0d79558d67b5f9115a2be86e9aac83a07 Jan 23 07:10:13 crc kubenswrapper[5102]: I0123 07:10:13.945357 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" event={"ID":"8ecee4c0-0bd6-443f-9cae-509d40fac70f","Type":"ContainerStarted","Data":"8f53d4f1607467dc5373eac310b1d9d0d79558d67b5f9115a2be86e9aac83a07"} Jan 23 07:10:23 crc kubenswrapper[5102]: I0123 07:10:23.035329 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" event={"ID":"8ecee4c0-0bd6-443f-9cae-509d40fac70f","Type":"ContainerStarted","Data":"a10d3b73e2299c30149c61df9b17c512caab19ff144cbf523fe2cb961e2c48bb"} Jan 23 07:10:23 crc kubenswrapper[5102]: I0123 07:10:23.057263 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-64cf6dff88-bf9mp" podStartSLOduration=1.260256503 podStartE2EDuration="10.057232418s" podCreationTimestamp="2026-01-23 07:10:13 +0000 UTC" firstStartedPulling="2026-01-23 07:10:13.798179637 +0000 UTC m=+964.618528612" lastFinishedPulling="2026-01-23 07:10:22.595155552 +0000 UTC m=+973.415504527" observedRunningTime="2026-01-23 07:10:23.056097503 +0000 UTC m=+973.876446488" watchObservedRunningTime="2026-01-23 07:10:23.057232418 +0000 UTC m=+973.877581383" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.459516 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-4fll4"] Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.463573 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.471704 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.472347 5102 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-6klbg" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.472625 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.472698 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-4fll4"] Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.563227 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc523104-1eac-4a3d-bb14-e21cc2a63e10-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-4fll4\" (UID: \"fc523104-1eac-4a3d-bb14-e21cc2a63e10\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.563354 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mzj8m\" (UniqueName: \"kubernetes.io/projected/fc523104-1eac-4a3d-bb14-e21cc2a63e10-kube-api-access-mzj8m\") pod \"cert-manager-cainjector-855d9ccff4-4fll4\" (UID: \"fc523104-1eac-4a3d-bb14-e21cc2a63e10\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.664955 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mzj8m\" (UniqueName: \"kubernetes.io/projected/fc523104-1eac-4a3d-bb14-e21cc2a63e10-kube-api-access-mzj8m\") pod \"cert-manager-cainjector-855d9ccff4-4fll4\" (UID: \"fc523104-1eac-4a3d-bb14-e21cc2a63e10\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.665133 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc523104-1eac-4a3d-bb14-e21cc2a63e10-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-4fll4\" (UID: \"fc523104-1eac-4a3d-bb14-e21cc2a63e10\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.695410 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/fc523104-1eac-4a3d-bb14-e21cc2a63e10-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-4fll4\" (UID: \"fc523104-1eac-4a3d-bb14-e21cc2a63e10\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.695718 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mzj8m\" (UniqueName: \"kubernetes.io/projected/fc523104-1eac-4a3d-bb14-e21cc2a63e10-kube-api-access-mzj8m\") pod \"cert-manager-cainjector-855d9ccff4-4fll4\" (UID: \"fc523104-1eac-4a3d-bb14-e21cc2a63e10\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:28 crc kubenswrapper[5102]: I0123 07:10:28.791457 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.034128 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-4fll4"] Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.079172 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" event={"ID":"fc523104-1eac-4a3d-bb14-e21cc2a63e10","Type":"ContainerStarted","Data":"7561f4adc47fc8eff7a882794208ecc1b51ed4c4d5939f78c3b73a254e1992ff"} Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.865475 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-cspwp"] Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.867319 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.871339 5102 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-scmhr" Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.879817 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-cspwp"] Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.987877 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/010c2bf3-7da5-419e-8889-f81a5f7d8bfe-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-cspwp\" (UID: \"010c2bf3-7da5-419e-8889-f81a5f7d8bfe\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:29 crc kubenswrapper[5102]: I0123 07:10:29.988678 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btwfw\" (UniqueName: \"kubernetes.io/projected/010c2bf3-7da5-419e-8889-f81a5f7d8bfe-kube-api-access-btwfw\") pod \"cert-manager-webhook-f4fb5df64-cspwp\" (UID: \"010c2bf3-7da5-419e-8889-f81a5f7d8bfe\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:30 crc kubenswrapper[5102]: I0123 07:10:30.089953 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btwfw\" (UniqueName: \"kubernetes.io/projected/010c2bf3-7da5-419e-8889-f81a5f7d8bfe-kube-api-access-btwfw\") pod \"cert-manager-webhook-f4fb5df64-cspwp\" (UID: \"010c2bf3-7da5-419e-8889-f81a5f7d8bfe\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:30 crc kubenswrapper[5102]: I0123 07:10:30.090032 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/010c2bf3-7da5-419e-8889-f81a5f7d8bfe-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-cspwp\" (UID: \"010c2bf3-7da5-419e-8889-f81a5f7d8bfe\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:30 crc kubenswrapper[5102]: I0123 07:10:30.113222 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/010c2bf3-7da5-419e-8889-f81a5f7d8bfe-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-cspwp\" (UID: \"010c2bf3-7da5-419e-8889-f81a5f7d8bfe\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:30 crc kubenswrapper[5102]: I0123 07:10:30.117787 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btwfw\" (UniqueName: \"kubernetes.io/projected/010c2bf3-7da5-419e-8889-f81a5f7d8bfe-kube-api-access-btwfw\") pod \"cert-manager-webhook-f4fb5df64-cspwp\" (UID: \"010c2bf3-7da5-419e-8889-f81a5f7d8bfe\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:30 crc kubenswrapper[5102]: I0123 07:10:30.198734 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:30 crc kubenswrapper[5102]: I0123 07:10:30.514783 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-cspwp"] Jan 23 07:10:30 crc kubenswrapper[5102]: W0123 07:10:30.518963 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod010c2bf3_7da5_419e_8889_f81a5f7d8bfe.slice/crio-9b68c92858f0c2a0b53f5eeb52a6f487464a8739ebaa20ff3a3ae33eeea92fab WatchSource:0}: Error finding container 9b68c92858f0c2a0b53f5eeb52a6f487464a8739ebaa20ff3a3ae33eeea92fab: Status 404 returned error can't find the container with id 9b68c92858f0c2a0b53f5eeb52a6f487464a8739ebaa20ff3a3ae33eeea92fab Jan 23 07:10:31 crc kubenswrapper[5102]: I0123 07:10:31.095031 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" event={"ID":"010c2bf3-7da5-419e-8889-f81a5f7d8bfe","Type":"ContainerStarted","Data":"9b68c92858f0c2a0b53f5eeb52a6f487464a8739ebaa20ff3a3ae33eeea92fab"} Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.243928 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-n44p9"] Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.245915 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.249236 5102 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-qcp6b" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.258935 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-n44p9"] Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.295977 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x8qs\" (UniqueName: \"kubernetes.io/projected/16bbdaaa-0566-4514-bfa1-186ffe09607f-kube-api-access-6x8qs\") pod \"cert-manager-86cb77c54b-n44p9\" (UID: \"16bbdaaa-0566-4514-bfa1-186ffe09607f\") " pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.296034 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/16bbdaaa-0566-4514-bfa1-186ffe09607f-bound-sa-token\") pod \"cert-manager-86cb77c54b-n44p9\" (UID: \"16bbdaaa-0566-4514-bfa1-186ffe09607f\") " pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.397766 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x8qs\" (UniqueName: \"kubernetes.io/projected/16bbdaaa-0566-4514-bfa1-186ffe09607f-kube-api-access-6x8qs\") pod \"cert-manager-86cb77c54b-n44p9\" (UID: \"16bbdaaa-0566-4514-bfa1-186ffe09607f\") " pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.397832 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/16bbdaaa-0566-4514-bfa1-186ffe09607f-bound-sa-token\") pod \"cert-manager-86cb77c54b-n44p9\" (UID: \"16bbdaaa-0566-4514-bfa1-186ffe09607f\") " pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.419345 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/16bbdaaa-0566-4514-bfa1-186ffe09607f-bound-sa-token\") pod \"cert-manager-86cb77c54b-n44p9\" (UID: \"16bbdaaa-0566-4514-bfa1-186ffe09607f\") " pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.421843 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x8qs\" (UniqueName: \"kubernetes.io/projected/16bbdaaa-0566-4514-bfa1-186ffe09607f-kube-api-access-6x8qs\") pod \"cert-manager-86cb77c54b-n44p9\" (UID: \"16bbdaaa-0566-4514-bfa1-186ffe09607f\") " pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:35 crc kubenswrapper[5102]: I0123 07:10:35.579400 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-n44p9" Jan 23 07:10:39 crc kubenswrapper[5102]: W0123 07:10:39.556061 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod16bbdaaa_0566_4514_bfa1_186ffe09607f.slice/crio-026143b48dd971cd048f580101e2fcdf1a889590b2627f558f661bb481585419 WatchSource:0}: Error finding container 026143b48dd971cd048f580101e2fcdf1a889590b2627f558f661bb481585419: Status 404 returned error can't find the container with id 026143b48dd971cd048f580101e2fcdf1a889590b2627f558f661bb481585419 Jan 23 07:10:39 crc kubenswrapper[5102]: I0123 07:10:39.556464 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-n44p9"] Jan 23 07:10:40 crc kubenswrapper[5102]: I0123 07:10:40.168991 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" event={"ID":"fc523104-1eac-4a3d-bb14-e21cc2a63e10","Type":"ContainerStarted","Data":"e3d5f5ae4e06215eac81dd9586bd229585da1769e8db341eda45f3d1fadb4cf1"} Jan 23 07:10:40 crc kubenswrapper[5102]: I0123 07:10:40.170673 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" event={"ID":"010c2bf3-7da5-419e-8889-f81a5f7d8bfe","Type":"ContainerStarted","Data":"b8867868626cead222aa732a755deec024d42132e3f4f5af3fda7030103ce288"} Jan 23 07:10:40 crc kubenswrapper[5102]: I0123 07:10:40.170805 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:40 crc kubenswrapper[5102]: I0123 07:10:40.172244 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-n44p9" event={"ID":"16bbdaaa-0566-4514-bfa1-186ffe09607f","Type":"ContainerStarted","Data":"655c4aa23e0097630041116c23d7362edc27e25a2b0632e9b1fe8fdbb8579b0c"} Jan 23 07:10:40 crc kubenswrapper[5102]: I0123 07:10:40.172282 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-n44p9" event={"ID":"16bbdaaa-0566-4514-bfa1-186ffe09607f","Type":"ContainerStarted","Data":"026143b48dd971cd048f580101e2fcdf1a889590b2627f558f661bb481585419"} Jan 23 07:10:40 crc kubenswrapper[5102]: I0123 07:10:40.190008 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-4fll4" podStartSLOduration=2.049355811 podStartE2EDuration="12.189983125s" podCreationTimestamp="2026-01-23 07:10:28 +0000 UTC" firstStartedPulling="2026-01-23 07:10:29.058802466 +0000 UTC m=+979.879151441" lastFinishedPulling="2026-01-23 07:10:39.19942978 +0000 UTC m=+990.019778755" observedRunningTime="2026-01-23 07:10:40.18408144 +0000 UTC m=+991.004430415" watchObservedRunningTime="2026-01-23 07:10:40.189983125 +0000 UTC m=+991.010332100" Jan 23 07:10:40 crc kubenswrapper[5102]: I0123 07:10:40.211938 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" podStartSLOduration=2.515548482 podStartE2EDuration="11.211911126s" podCreationTimestamp="2026-01-23 07:10:29 +0000 UTC" firstStartedPulling="2026-01-23 07:10:30.522845068 +0000 UTC m=+981.343194043" lastFinishedPulling="2026-01-23 07:10:39.219207712 +0000 UTC m=+990.039556687" observedRunningTime="2026-01-23 07:10:40.207861878 +0000 UTC m=+991.028210853" watchObservedRunningTime="2026-01-23 07:10:40.211911126 +0000 UTC m=+991.032260101" Jan 23 07:10:45 crc kubenswrapper[5102]: I0123 07:10:45.201824 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-cspwp" Jan 23 07:10:45 crc kubenswrapper[5102]: I0123 07:10:45.227380 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-n44p9" podStartSLOduration=10.227349135 podStartE2EDuration="10.227349135s" podCreationTimestamp="2026-01-23 07:10:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:10:40.235016953 +0000 UTC m=+991.055365928" watchObservedRunningTime="2026-01-23 07:10:45.227349135 +0000 UTC m=+996.047698110" Jan 23 07:10:46 crc kubenswrapper[5102]: I0123 07:10:46.769173 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:10:46 crc kubenswrapper[5102]: I0123 07:10:46.769806 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.075805 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-l6plb"] Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.079641 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-l6plb" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.087658 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-index-dockercfg-lzk59" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.088776 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"kube-root-ca.crt" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.089388 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack-operators"/"openshift-service-ca.crt" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.100872 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-l6plb"] Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.222815 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vmkt7\" (UniqueName: \"kubernetes.io/projected/2769e9f5-b41b-41cc-b13b-949ac783b36c-kube-api-access-vmkt7\") pod \"openstack-operator-index-l6plb\" (UID: \"2769e9f5-b41b-41cc-b13b-949ac783b36c\") " pod="openstack-operators/openstack-operator-index-l6plb" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.324521 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vmkt7\" (UniqueName: \"kubernetes.io/projected/2769e9f5-b41b-41cc-b13b-949ac783b36c-kube-api-access-vmkt7\") pod \"openstack-operator-index-l6plb\" (UID: \"2769e9f5-b41b-41cc-b13b-949ac783b36c\") " pod="openstack-operators/openstack-operator-index-l6plb" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.346690 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vmkt7\" (UniqueName: \"kubernetes.io/projected/2769e9f5-b41b-41cc-b13b-949ac783b36c-kube-api-access-vmkt7\") pod \"openstack-operator-index-l6plb\" (UID: \"2769e9f5-b41b-41cc-b13b-949ac783b36c\") " pod="openstack-operators/openstack-operator-index-l6plb" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.403278 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-l6plb" Jan 23 07:10:49 crc kubenswrapper[5102]: I0123 07:10:49.846050 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-l6plb"] Jan 23 07:10:49 crc kubenswrapper[5102]: W0123 07:10:49.850366 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2769e9f5_b41b_41cc_b13b_949ac783b36c.slice/crio-c8a0e9f0662e3bc27093a94d69151fc7e9162dce698cdc1eb469ca887afa38e4 WatchSource:0}: Error finding container c8a0e9f0662e3bc27093a94d69151fc7e9162dce698cdc1eb469ca887afa38e4: Status 404 returned error can't find the container with id c8a0e9f0662e3bc27093a94d69151fc7e9162dce698cdc1eb469ca887afa38e4 Jan 23 07:10:50 crc kubenswrapper[5102]: I0123 07:10:50.246650 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-l6plb" event={"ID":"2769e9f5-b41b-41cc-b13b-949ac783b36c","Type":"ContainerStarted","Data":"c8a0e9f0662e3bc27093a94d69151fc7e9162dce698cdc1eb469ca887afa38e4"} Jan 23 07:10:51 crc kubenswrapper[5102]: I0123 07:10:51.257443 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-l6plb" event={"ID":"2769e9f5-b41b-41cc-b13b-949ac783b36c","Type":"ContainerStarted","Data":"a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f"} Jan 23 07:10:51 crc kubenswrapper[5102]: I0123 07:10:51.282200 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-l6plb" podStartSLOduration=1.604279346 podStartE2EDuration="2.282173588s" podCreationTimestamp="2026-01-23 07:10:49 +0000 UTC" firstStartedPulling="2026-01-23 07:10:49.853061075 +0000 UTC m=+1000.673410050" lastFinishedPulling="2026-01-23 07:10:50.530955307 +0000 UTC m=+1001.351304292" observedRunningTime="2026-01-23 07:10:51.277461669 +0000 UTC m=+1002.097810644" watchObservedRunningTime="2026-01-23 07:10:51.282173588 +0000 UTC m=+1002.102522563" Jan 23 07:10:52 crc kubenswrapper[5102]: I0123 07:10:52.451156 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-l6plb"] Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.054389 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-index-svxnq"] Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.055149 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.067924 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-svxnq"] Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.125354 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-72qmv\" (UniqueName: \"kubernetes.io/projected/faa339ad-749a-4eca-a33b-65d14522c3c1-kube-api-access-72qmv\") pod \"openstack-operator-index-svxnq\" (UID: \"faa339ad-749a-4eca-a33b-65d14522c3c1\") " pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.228733 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-72qmv\" (UniqueName: \"kubernetes.io/projected/faa339ad-749a-4eca-a33b-65d14522c3c1-kube-api-access-72qmv\") pod \"openstack-operator-index-svxnq\" (UID: \"faa339ad-749a-4eca-a33b-65d14522c3c1\") " pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.261492 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-72qmv\" (UniqueName: \"kubernetes.io/projected/faa339ad-749a-4eca-a33b-65d14522c3c1-kube-api-access-72qmv\") pod \"openstack-operator-index-svxnq\" (UID: \"faa339ad-749a-4eca-a33b-65d14522c3c1\") " pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.272803 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack-operators/openstack-operator-index-l6plb" podUID="2769e9f5-b41b-41cc-b13b-949ac783b36c" containerName="registry-server" containerID="cri-o://a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f" gracePeriod=2 Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.382522 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.777691 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-index-svxnq"] Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.825261 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-l6plb" Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.855041 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vmkt7\" (UniqueName: \"kubernetes.io/projected/2769e9f5-b41b-41cc-b13b-949ac783b36c-kube-api-access-vmkt7\") pod \"2769e9f5-b41b-41cc-b13b-949ac783b36c\" (UID: \"2769e9f5-b41b-41cc-b13b-949ac783b36c\") " Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.861452 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2769e9f5-b41b-41cc-b13b-949ac783b36c-kube-api-access-vmkt7" (OuterVolumeSpecName: "kube-api-access-vmkt7") pod "2769e9f5-b41b-41cc-b13b-949ac783b36c" (UID: "2769e9f5-b41b-41cc-b13b-949ac783b36c"). InnerVolumeSpecName "kube-api-access-vmkt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:10:53 crc kubenswrapper[5102]: I0123 07:10:53.956304 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vmkt7\" (UniqueName: \"kubernetes.io/projected/2769e9f5-b41b-41cc-b13b-949ac783b36c-kube-api-access-vmkt7\") on node \"crc\" DevicePath \"\"" Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.283532 5102 generic.go:334] "Generic (PLEG): container finished" podID="2769e9f5-b41b-41cc-b13b-949ac783b36c" containerID="a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f" exitCode=0 Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.284020 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-l6plb" event={"ID":"2769e9f5-b41b-41cc-b13b-949ac783b36c","Type":"ContainerDied","Data":"a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f"} Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.284138 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-l6plb" event={"ID":"2769e9f5-b41b-41cc-b13b-949ac783b36c","Type":"ContainerDied","Data":"c8a0e9f0662e3bc27093a94d69151fc7e9162dce698cdc1eb469ca887afa38e4"} Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.284173 5102 scope.go:117] "RemoveContainer" containerID="a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f" Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.284378 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-index-l6plb" Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.287604 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-svxnq" event={"ID":"faa339ad-749a-4eca-a33b-65d14522c3c1","Type":"ContainerStarted","Data":"0e77176518054640dd7b9ed29b676979ab9b503b98d32cd5c4abb963b44c426c"} Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.327821 5102 scope.go:117] "RemoveContainer" containerID="a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f" Jan 23 07:10:54 crc kubenswrapper[5102]: E0123 07:10:54.328613 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f\": container with ID starting with a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f not found: ID does not exist" containerID="a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f" Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.328652 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f"} err="failed to get container status \"a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f\": rpc error: code = NotFound desc = could not find container \"a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f\": container with ID starting with a2d95baef1d4bf6907cf8f6aec8a69d47ed5bb043663c2ce8d2cafc2e5cc4f1f not found: ID does not exist" Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.357787 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack-operators/openstack-operator-index-l6plb"] Jan 23 07:10:54 crc kubenswrapper[5102]: I0123 07:10:54.364988 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack-operators/openstack-operator-index-l6plb"] Jan 23 07:10:55 crc kubenswrapper[5102]: I0123 07:10:55.297642 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-index-svxnq" event={"ID":"faa339ad-749a-4eca-a33b-65d14522c3c1","Type":"ContainerStarted","Data":"f3bd03c7490fc99421fa1d22db1934328780ebfcead30a9e5542ea0fa2efe6b7"} Jan 23 07:10:55 crc kubenswrapper[5102]: I0123 07:10:55.312946 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-index-svxnq" podStartSLOduration=1.8309593720000001 podStartE2EDuration="2.312926737s" podCreationTimestamp="2026-01-23 07:10:53 +0000 UTC" firstStartedPulling="2026-01-23 07:10:53.788674098 +0000 UTC m=+1004.609023073" lastFinishedPulling="2026-01-23 07:10:54.270641453 +0000 UTC m=+1005.090990438" observedRunningTime="2026-01-23 07:10:55.311923755 +0000 UTC m=+1006.132272730" watchObservedRunningTime="2026-01-23 07:10:55.312926737 +0000 UTC m=+1006.133275712" Jan 23 07:10:55 crc kubenswrapper[5102]: I0123 07:10:55.608654 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2769e9f5-b41b-41cc-b13b-949ac783b36c" path="/var/lib/kubelet/pods/2769e9f5-b41b-41cc-b13b-949ac783b36c/volumes" Jan 23 07:11:03 crc kubenswrapper[5102]: I0123 07:11:03.382958 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:11:03 crc kubenswrapper[5102]: I0123 07:11:03.383936 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:11:03 crc kubenswrapper[5102]: I0123 07:11:03.420265 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:11:03 crc kubenswrapper[5102]: I0123 07:11:03.458506 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-index-svxnq" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.521978 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g"] Jan 23 07:11:05 crc kubenswrapper[5102]: E0123 07:11:05.522987 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2769e9f5-b41b-41cc-b13b-949ac783b36c" containerName="registry-server" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.523021 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2769e9f5-b41b-41cc-b13b-949ac783b36c" containerName="registry-server" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.523258 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2769e9f5-b41b-41cc-b13b-949ac783b36c" containerName="registry-server" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.524835 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.531991 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"default-dockercfg-x6s9n" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.542038 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g"] Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.566277 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-bundle\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.566378 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-util\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.566408 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sl6nd\" (UniqueName: \"kubernetes.io/projected/d61d5841-724a-4fdb-8051-4625df4a1f1d-kube-api-access-sl6nd\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.667730 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-util\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.667804 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sl6nd\" (UniqueName: \"kubernetes.io/projected/d61d5841-724a-4fdb-8051-4625df4a1f1d-kube-api-access-sl6nd\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.667877 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-bundle\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.668386 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-bundle\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.668668 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-util\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.690508 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sl6nd\" (UniqueName: \"kubernetes.io/projected/d61d5841-724a-4fdb-8051-4625df4a1f1d-kube-api-access-sl6nd\") pod \"97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:05 crc kubenswrapper[5102]: I0123 07:11:05.843716 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:06 crc kubenswrapper[5102]: I0123 07:11:06.319948 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g"] Jan 23 07:11:06 crc kubenswrapper[5102]: W0123 07:11:06.344059 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd61d5841_724a_4fdb_8051_4625df4a1f1d.slice/crio-90c57eaa6cf3b132bdf4681354133d9cb55693c8cf9120c51e4d57eca602a0c0 WatchSource:0}: Error finding container 90c57eaa6cf3b132bdf4681354133d9cb55693c8cf9120c51e4d57eca602a0c0: Status 404 returned error can't find the container with id 90c57eaa6cf3b132bdf4681354133d9cb55693c8cf9120c51e4d57eca602a0c0 Jan 23 07:11:06 crc kubenswrapper[5102]: I0123 07:11:06.445962 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" event={"ID":"d61d5841-724a-4fdb-8051-4625df4a1f1d","Type":"ContainerStarted","Data":"90c57eaa6cf3b132bdf4681354133d9cb55693c8cf9120c51e4d57eca602a0c0"} Jan 23 07:11:07 crc kubenswrapper[5102]: I0123 07:11:07.457388 5102 generic.go:334] "Generic (PLEG): container finished" podID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerID="6548995a8005b73d39ccde756a2576e6f7431d05013e2df36133e184d1d9c2f6" exitCode=0 Jan 23 07:11:07 crc kubenswrapper[5102]: I0123 07:11:07.457472 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" event={"ID":"d61d5841-724a-4fdb-8051-4625df4a1f1d","Type":"ContainerDied","Data":"6548995a8005b73d39ccde756a2576e6f7431d05013e2df36133e184d1d9c2f6"} Jan 23 07:11:08 crc kubenswrapper[5102]: I0123 07:11:08.468825 5102 generic.go:334] "Generic (PLEG): container finished" podID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerID="21a5018509270b5ab104bbd1bc2781909dce635a435302aaba469ec0c5e93cbe" exitCode=0 Jan 23 07:11:08 crc kubenswrapper[5102]: I0123 07:11:08.468914 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" event={"ID":"d61d5841-724a-4fdb-8051-4625df4a1f1d","Type":"ContainerDied","Data":"21a5018509270b5ab104bbd1bc2781909dce635a435302aaba469ec0c5e93cbe"} Jan 23 07:11:09 crc kubenswrapper[5102]: I0123 07:11:09.479956 5102 generic.go:334] "Generic (PLEG): container finished" podID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerID="b9eaee354d70c7f06c1e58dd1fe0f7efc43e2a247ee092912cee9036c50b7d50" exitCode=0 Jan 23 07:11:09 crc kubenswrapper[5102]: I0123 07:11:09.480041 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" event={"ID":"d61d5841-724a-4fdb-8051-4625df4a1f1d","Type":"ContainerDied","Data":"b9eaee354d70c7f06c1e58dd1fe0f7efc43e2a247ee092912cee9036c50b7d50"} Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.798590 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.884465 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-util\") pod \"d61d5841-724a-4fdb-8051-4625df4a1f1d\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.884901 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sl6nd\" (UniqueName: \"kubernetes.io/projected/d61d5841-724a-4fdb-8051-4625df4a1f1d-kube-api-access-sl6nd\") pod \"d61d5841-724a-4fdb-8051-4625df4a1f1d\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.885142 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-bundle\") pod \"d61d5841-724a-4fdb-8051-4625df4a1f1d\" (UID: \"d61d5841-724a-4fdb-8051-4625df4a1f1d\") " Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.886173 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-bundle" (OuterVolumeSpecName: "bundle") pod "d61d5841-724a-4fdb-8051-4625df4a1f1d" (UID: "d61d5841-724a-4fdb-8051-4625df4a1f1d"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.893197 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d61d5841-724a-4fdb-8051-4625df4a1f1d-kube-api-access-sl6nd" (OuterVolumeSpecName: "kube-api-access-sl6nd") pod "d61d5841-724a-4fdb-8051-4625df4a1f1d" (UID: "d61d5841-724a-4fdb-8051-4625df4a1f1d"). InnerVolumeSpecName "kube-api-access-sl6nd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.898398 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-util" (OuterVolumeSpecName: "util") pod "d61d5841-724a-4fdb-8051-4625df4a1f1d" (UID: "d61d5841-724a-4fdb-8051-4625df4a1f1d"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.986737 5102 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-util\") on node \"crc\" DevicePath \"\"" Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.986780 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sl6nd\" (UniqueName: \"kubernetes.io/projected/d61d5841-724a-4fdb-8051-4625df4a1f1d-kube-api-access-sl6nd\") on node \"crc\" DevicePath \"\"" Jan 23 07:11:10 crc kubenswrapper[5102]: I0123 07:11:10.986793 5102 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/d61d5841-724a-4fdb-8051-4625df4a1f1d-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:11:11 crc kubenswrapper[5102]: I0123 07:11:11.500683 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" event={"ID":"d61d5841-724a-4fdb-8051-4625df4a1f1d","Type":"ContainerDied","Data":"90c57eaa6cf3b132bdf4681354133d9cb55693c8cf9120c51e4d57eca602a0c0"} Jan 23 07:11:11 crc kubenswrapper[5102]: I0123 07:11:11.500756 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="90c57eaa6cf3b132bdf4681354133d9cb55693c8cf9120c51e4d57eca602a0c0" Jan 23 07:11:11 crc kubenswrapper[5102]: I0123 07:11:11.501075 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack-operators/97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g" Jan 23 07:11:16 crc kubenswrapper[5102]: I0123 07:11:16.768475 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:11:16 crc kubenswrapper[5102]: I0123 07:11:16.769161 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.554590 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b"] Jan 23 07:11:18 crc kubenswrapper[5102]: E0123 07:11:18.554861 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerName="util" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.554875 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerName="util" Jan 23 07:11:18 crc kubenswrapper[5102]: E0123 07:11:18.554883 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerName="extract" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.554890 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerName="extract" Jan 23 07:11:18 crc kubenswrapper[5102]: E0123 07:11:18.554912 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerName="pull" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.554918 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerName="pull" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.555031 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d61d5841-724a-4fdb-8051-4625df4a1f1d" containerName="extract" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.555436 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.569324 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-init-dockercfg-t9vnj" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.597447 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b"] Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.732722 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-849f5\" (UniqueName: \"kubernetes.io/projected/1b68a251-dbfe-4765-9b64-2bfa66e02d96-kube-api-access-849f5\") pod \"openstack-operator-controller-init-85bfd44c94-wv42b\" (UID: \"1b68a251-dbfe-4765-9b64-2bfa66e02d96\") " pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.835060 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-849f5\" (UniqueName: \"kubernetes.io/projected/1b68a251-dbfe-4765-9b64-2bfa66e02d96-kube-api-access-849f5\") pod \"openstack-operator-controller-init-85bfd44c94-wv42b\" (UID: \"1b68a251-dbfe-4765-9b64-2bfa66e02d96\") " pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.864958 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-849f5\" (UniqueName: \"kubernetes.io/projected/1b68a251-dbfe-4765-9b64-2bfa66e02d96-kube-api-access-849f5\") pod \"openstack-operator-controller-init-85bfd44c94-wv42b\" (UID: \"1b68a251-dbfe-4765-9b64-2bfa66e02d96\") " pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" Jan 23 07:11:18 crc kubenswrapper[5102]: I0123 07:11:18.887233 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" Jan 23 07:11:19 crc kubenswrapper[5102]: I0123 07:11:19.398914 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b"] Jan 23 07:11:19 crc kubenswrapper[5102]: I0123 07:11:19.591188 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" event={"ID":"1b68a251-dbfe-4765-9b64-2bfa66e02d96","Type":"ContainerStarted","Data":"f03f2459e0d04c7e1e958ec361ff83db428eb0cd9870931fbb779610fe3bcd1f"} Jan 23 07:11:25 crc kubenswrapper[5102]: I0123 07:11:25.664675 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" event={"ID":"1b68a251-dbfe-4765-9b64-2bfa66e02d96","Type":"ContainerStarted","Data":"c68aa2e7adc21593283428ac8f9b734784afcb2163dd17cffe4a029ff81b5f6f"} Jan 23 07:11:25 crc kubenswrapper[5102]: I0123 07:11:25.665375 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" Jan 23 07:11:25 crc kubenswrapper[5102]: I0123 07:11:25.703351 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" podStartSLOduration=2.393329335 podStartE2EDuration="7.703322677s" podCreationTimestamp="2026-01-23 07:11:18 +0000 UTC" firstStartedPulling="2026-01-23 07:11:19.416561584 +0000 UTC m=+1030.236910559" lastFinishedPulling="2026-01-23 07:11:24.726554926 +0000 UTC m=+1035.546903901" observedRunningTime="2026-01-23 07:11:25.702264405 +0000 UTC m=+1036.522613380" watchObservedRunningTime="2026-01-23 07:11:25.703322677 +0000 UTC m=+1036.523671652" Jan 23 07:11:38 crc kubenswrapper[5102]: I0123 07:11:38.890617 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-init-85bfd44c94-wv42b" Jan 23 07:11:46 crc kubenswrapper[5102]: I0123 07:11:46.776645 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:11:46 crc kubenswrapper[5102]: I0123 07:11:46.777729 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:11:46 crc kubenswrapper[5102]: I0123 07:11:46.777821 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:11:46 crc kubenswrapper[5102]: I0123 07:11:46.778875 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f472221ddd8fa6fce7a56b57a18ba14ffb89ee90fa252181919bdb5177527a31"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:11:46 crc kubenswrapper[5102]: I0123 07:11:46.778943 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://f472221ddd8fa6fce7a56b57a18ba14ffb89ee90fa252181919bdb5177527a31" gracePeriod=600 Jan 23 07:11:47 crc kubenswrapper[5102]: I0123 07:11:47.847004 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="f472221ddd8fa6fce7a56b57a18ba14ffb89ee90fa252181919bdb5177527a31" exitCode=0 Jan 23 07:11:47 crc kubenswrapper[5102]: I0123 07:11:47.847091 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"f472221ddd8fa6fce7a56b57a18ba14ffb89ee90fa252181919bdb5177527a31"} Jan 23 07:11:47 crc kubenswrapper[5102]: I0123 07:11:47.847605 5102 scope.go:117] "RemoveContainer" containerID="a3ea4a6a5f40da62670a32045877b314d68399c0e473852af43fba0c48ca60c3" Jan 23 07:11:48 crc kubenswrapper[5102]: I0123 07:11:48.858040 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"a733638e06484172d6918735e8bb55956644c4519eef105a0e4f5d17b554c3be"} Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.141101 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.142525 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.146912 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"barbican-operator-controller-manager-dockercfg-hzpbl" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.159440 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.160743 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.163838 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"cinder-operator-controller-manager-dockercfg-j9dpg" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.175039 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.188048 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.189179 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.193758 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"designate-operator-controller-manager-dockercfg-x8l72" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.205204 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.206476 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.215979 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"glance-operator-controller-manager-dockercfg-pwsdq" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.219334 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.240148 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.249424 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.250652 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.255557 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"heat-operator-controller-manager-dockercfg-gfcjf" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.259235 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.269155 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.270231 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.270228 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wj2zs\" (UniqueName: \"kubernetes.io/projected/eab33781-ceb7-4c8b-8df9-55ca5ab33f17-kube-api-access-wj2zs\") pod \"designate-operator-controller-manager-b45d7bf98-mth4h\" (UID: \"eab33781-ceb7-4c8b-8df9-55ca5ab33f17\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.270488 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dkfp8\" (UniqueName: \"kubernetes.io/projected/73336c80-3616-4716-9ecf-cfe3f2114c4a-kube-api-access-dkfp8\") pod \"cinder-operator-controller-manager-69cf5d4557-wb6dk\" (UID: \"73336c80-3616-4716-9ecf-cfe3f2114c4a\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.270565 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xrfkp\" (UniqueName: \"kubernetes.io/projected/1ae0c44b-b391-4df4-8246-b0e24f649e8b-kube-api-access-xrfkp\") pod \"barbican-operator-controller-manager-59dd8b7cbf-272zl\" (UID: \"1ae0c44b-b391-4df4-8246-b0e24f649e8b\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.272704 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"horizon-operator-controller-manager-dockercfg-2bbmj" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.295360 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.304324 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.305592 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.309140 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-webhook-server-cert" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.310866 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-8q4bf" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.327245 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.339314 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.354192 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.356758 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.360003 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ironic-operator-controller-manager-dockercfg-rkn7k" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.376855 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wj2zs\" (UniqueName: \"kubernetes.io/projected/eab33781-ceb7-4c8b-8df9-55ca5ab33f17-kube-api-access-wj2zs\") pod \"designate-operator-controller-manager-b45d7bf98-mth4h\" (UID: \"eab33781-ceb7-4c8b-8df9-55ca5ab33f17\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.376924 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-scrlq\" (UniqueName: \"kubernetes.io/projected/52d07fd1-692c-461a-baf4-51d4af679796-kube-api-access-scrlq\") pod \"glance-operator-controller-manager-78fdd796fd-n79vx\" (UID: \"52d07fd1-692c-461a-baf4-51d4af679796\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.376954 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l6r7p\" (UniqueName: \"kubernetes.io/projected/d31fc107-8403-4c08-9058-483dafc58c60-kube-api-access-l6r7p\") pod \"horizon-operator-controller-manager-77d5c5b54f-2cmr7\" (UID: \"d31fc107-8403-4c08-9058-483dafc58c60\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.377000 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dkfp8\" (UniqueName: \"kubernetes.io/projected/73336c80-3616-4716-9ecf-cfe3f2114c4a-kube-api-access-dkfp8\") pod \"cinder-operator-controller-manager-69cf5d4557-wb6dk\" (UID: \"73336c80-3616-4716-9ecf-cfe3f2114c4a\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.377032 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-48pqq\" (UniqueName: \"kubernetes.io/projected/9c38e15e-2ddd-473c-892a-59aa9978e12c-kube-api-access-48pqq\") pod \"heat-operator-controller-manager-594c8c9d5d-mjxxp\" (UID: \"9c38e15e-2ddd-473c-892a-59aa9978e12c\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.377065 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xrfkp\" (UniqueName: \"kubernetes.io/projected/1ae0c44b-b391-4df4-8246-b0e24f649e8b-kube-api-access-xrfkp\") pod \"barbican-operator-controller-manager-59dd8b7cbf-272zl\" (UID: \"1ae0c44b-b391-4df4-8246-b0e24f649e8b\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.386892 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.423970 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.427774 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wj2zs\" (UniqueName: \"kubernetes.io/projected/eab33781-ceb7-4c8b-8df9-55ca5ab33f17-kube-api-access-wj2zs\") pod \"designate-operator-controller-manager-b45d7bf98-mth4h\" (UID: \"eab33781-ceb7-4c8b-8df9-55ca5ab33f17\") " pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.430873 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xrfkp\" (UniqueName: \"kubernetes.io/projected/1ae0c44b-b391-4df4-8246-b0e24f649e8b-kube-api-access-xrfkp\") pod \"barbican-operator-controller-manager-59dd8b7cbf-272zl\" (UID: \"1ae0c44b-b391-4df4-8246-b0e24f649e8b\") " pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.431312 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.435692 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"keystone-operator-controller-manager-dockercfg-btbnt" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.463362 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.464473 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dkfp8\" (UniqueName: \"kubernetes.io/projected/73336c80-3616-4716-9ecf-cfe3f2114c4a-kube-api-access-dkfp8\") pod \"cinder-operator-controller-manager-69cf5d4557-wb6dk\" (UID: \"73336c80-3616-4716-9ecf-cfe3f2114c4a\") " pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.465007 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.470918 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.478462 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xqgmm\" (UniqueName: \"kubernetes.io/projected/51b93e08-8c79-4ccc-b4d4-c5d54e095284-kube-api-access-xqgmm\") pod \"ironic-operator-controller-manager-69d6c9f5b8-wcdgn\" (UID: \"51b93e08-8c79-4ccc-b4d4-c5d54e095284\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.478576 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.478617 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-scrlq\" (UniqueName: \"kubernetes.io/projected/52d07fd1-692c-461a-baf4-51d4af679796-kube-api-access-scrlq\") pod \"glance-operator-controller-manager-78fdd796fd-n79vx\" (UID: \"52d07fd1-692c-461a-baf4-51d4af679796\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.478638 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l6r7p\" (UniqueName: \"kubernetes.io/projected/d31fc107-8403-4c08-9058-483dafc58c60-kube-api-access-l6r7p\") pod \"horizon-operator-controller-manager-77d5c5b54f-2cmr7\" (UID: \"d31fc107-8403-4c08-9058-483dafc58c60\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.478671 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-48pqq\" (UniqueName: \"kubernetes.io/projected/9c38e15e-2ddd-473c-892a-59aa9978e12c-kube-api-access-48pqq\") pod \"heat-operator-controller-manager-594c8c9d5d-mjxxp\" (UID: \"9c38e15e-2ddd-473c-892a-59aa9978e12c\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.478701 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhxxp\" (UniqueName: \"kubernetes.io/projected/8661bd2c-86be-46fd-95d7-df60f1736855-kube-api-access-dhxxp\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.479436 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.494400 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.513477 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.523185 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"manila-operator-controller-manager-dockercfg-chxm6" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.524244 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-scrlq\" (UniqueName: \"kubernetes.io/projected/52d07fd1-692c-461a-baf4-51d4af679796-kube-api-access-scrlq\") pod \"glance-operator-controller-manager-78fdd796fd-n79vx\" (UID: \"52d07fd1-692c-461a-baf4-51d4af679796\") " pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.541433 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.552201 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l6r7p\" (UniqueName: \"kubernetes.io/projected/d31fc107-8403-4c08-9058-483dafc58c60-kube-api-access-l6r7p\") pod \"horizon-operator-controller-manager-77d5c5b54f-2cmr7\" (UID: \"d31fc107-8403-4c08-9058-483dafc58c60\") " pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.555693 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-48pqq\" (UniqueName: \"kubernetes.io/projected/9c38e15e-2ddd-473c-892a-59aa9978e12c-kube-api-access-48pqq\") pod \"heat-operator-controller-manager-594c8c9d5d-mjxxp\" (UID: \"9c38e15e-2ddd-473c-892a-59aa9978e12c\") " pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.556121 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.578960 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.586934 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.605130 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.628269 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.631949 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnn4z\" (UniqueName: \"kubernetes.io/projected/9a8349c3-c1d1-4549-9a7f-67755c04328f-kube-api-access-xnn4z\") pod \"keystone-operator-controller-manager-b8b6d4659-52bz9\" (UID: \"9a8349c3-c1d1-4549-9a7f-67755c04328f\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.632027 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhxxp\" (UniqueName: \"kubernetes.io/projected/8661bd2c-86be-46fd-95d7-df60f1736855-kube-api-access-dhxxp\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.632088 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-649lp\" (UniqueName: \"kubernetes.io/projected/80047622-db1e-4345-b2aa-e44f716fe6ad-kube-api-access-649lp\") pod \"manila-operator-controller-manager-78c6999f6f-rs5gj\" (UID: \"80047622-db1e-4345-b2aa-e44f716fe6ad\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.632120 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xqgmm\" (UniqueName: \"kubernetes.io/projected/51b93e08-8c79-4ccc-b4d4-c5d54e095284-kube-api-access-xqgmm\") pod \"ironic-operator-controller-manager-69d6c9f5b8-wcdgn\" (UID: \"51b93e08-8c79-4ccc-b4d4-c5d54e095284\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.632200 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:11:58 crc kubenswrapper[5102]: E0123 07:11:58.632362 5102 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 07:11:58 crc kubenswrapper[5102]: E0123 07:11:58.632440 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert podName:8661bd2c-86be-46fd-95d7-df60f1736855 nodeName:}" failed. No retries permitted until 2026-01-23 07:11:59.132404325 +0000 UTC m=+1069.952753300 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert") pod "infra-operator-controller-manager-54ccf4f85d-lwblf" (UID: "8661bd2c-86be-46fd-95d7-df60f1736855") : secret "infra-operator-webhook-server-cert" not found Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.633956 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"mariadb-operator-controller-manager-dockercfg-pkjfz" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.650274 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.664355 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xqgmm\" (UniqueName: \"kubernetes.io/projected/51b93e08-8c79-4ccc-b4d4-c5d54e095284-kube-api-access-xqgmm\") pod \"ironic-operator-controller-manager-69d6c9f5b8-wcdgn\" (UID: \"51b93e08-8c79-4ccc-b4d4-c5d54e095284\") " pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.664596 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhxxp\" (UniqueName: \"kubernetes.io/projected/8661bd2c-86be-46fd-95d7-df60f1736855-kube-api-access-dhxxp\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.675151 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.676220 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.680297 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"neutron-operator-controller-manager-dockercfg-jqw4w" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.687690 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.688580 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.688983 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.690999 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"nova-operator-controller-manager-dockercfg-79rnr" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.693859 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.739214 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnn4z\" (UniqueName: \"kubernetes.io/projected/9a8349c3-c1d1-4549-9a7f-67755c04328f-kube-api-access-xnn4z\") pod \"keystone-operator-controller-manager-b8b6d4659-52bz9\" (UID: \"9a8349c3-c1d1-4549-9a7f-67755c04328f\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.739348 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-649lp\" (UniqueName: \"kubernetes.io/projected/80047622-db1e-4345-b2aa-e44f716fe6ad-kube-api-access-649lp\") pod \"manila-operator-controller-manager-78c6999f6f-rs5gj\" (UID: \"80047622-db1e-4345-b2aa-e44f716fe6ad\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.739479 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b6q97\" (UniqueName: \"kubernetes.io/projected/529e16f1-1e4b-4ba2-8855-e8a445d0c63f-kube-api-access-b6q97\") pod \"mariadb-operator-controller-manager-c87fff755-j6bsb\" (UID: \"529e16f1-1e4b-4ba2-8855-e8a445d0c63f\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.745119 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.749419 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.753708 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"octavia-operator-controller-manager-dockercfg-ljkrd" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.772974 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.786770 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.800213 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-649lp\" (UniqueName: \"kubernetes.io/projected/80047622-db1e-4345-b2aa-e44f716fe6ad-kube-api-access-649lp\") pod \"manila-operator-controller-manager-78c6999f6f-rs5gj\" (UID: \"80047622-db1e-4345-b2aa-e44f716fe6ad\") " pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.801914 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnn4z\" (UniqueName: \"kubernetes.io/projected/9a8349c3-c1d1-4549-9a7f-67755c04328f-kube-api-access-xnn4z\") pod \"keystone-operator-controller-manager-b8b6d4659-52bz9\" (UID: \"9a8349c3-c1d1-4549-9a7f-67755c04328f\") " pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.829747 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.831185 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.839101 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-webhook-server-cert" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.839307 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-rs8sr" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.845930 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j2rs4\" (UniqueName: \"kubernetes.io/projected/4027b29c-39eb-4b48-b17d-64c6587dc3fb-kube-api-access-j2rs4\") pod \"octavia-operator-controller-manager-7bd9774b6-nwpwk\" (UID: \"4027b29c-39eb-4b48-b17d-64c6587dc3fb\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.846949 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhbl4\" (UniqueName: \"kubernetes.io/projected/40fc8f9a-303c-4264-ac77-448100591967-kube-api-access-dhbl4\") pod \"neutron-operator-controller-manager-5d8f59fb49-b78cx\" (UID: \"40fc8f9a-303c-4264-ac77-448100591967\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.847398 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b6q97\" (UniqueName: \"kubernetes.io/projected/529e16f1-1e4b-4ba2-8855-e8a445d0c63f-kube-api-access-b6q97\") pod \"mariadb-operator-controller-manager-c87fff755-j6bsb\" (UID: \"529e16f1-1e4b-4ba2-8855-e8a445d0c63f\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.847494 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4jzs8\" (UniqueName: \"kubernetes.io/projected/750c537e-8dea-47b4-883c-c30a44e5f48c-kube-api-access-4jzs8\") pod \"nova-operator-controller-manager-6b8bc8d87d-p4czx\" (UID: \"750c537e-8dea-47b4-883c-c30a44e5f48c\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.848923 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.850230 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.856836 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"ovn-operator-controller-manager-dockercfg-b2bbv" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.895845 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b6q97\" (UniqueName: \"kubernetes.io/projected/529e16f1-1e4b-4ba2-8855-e8a445d0c63f-kube-api-access-b6q97\") pod \"mariadb-operator-controller-manager-c87fff755-j6bsb\" (UID: \"529e16f1-1e4b-4ba2-8855-e8a445d0c63f\") " pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.901351 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.934241 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.951836 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4jzs8\" (UniqueName: \"kubernetes.io/projected/750c537e-8dea-47b4-883c-c30a44e5f48c-kube-api-access-4jzs8\") pod \"nova-operator-controller-manager-6b8bc8d87d-p4czx\" (UID: \"750c537e-8dea-47b4-883c-c30a44e5f48c\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.954184 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skhqh\" (UniqueName: \"kubernetes.io/projected/8d146faa-8342-4adc-8e6d-37018df6873f-kube-api-access-skhqh\") pod \"ovn-operator-controller-manager-55db956ddc-bmq5z\" (UID: \"8d146faa-8342-4adc-8e6d-37018df6873f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.954279 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j2rs4\" (UniqueName: \"kubernetes.io/projected/4027b29c-39eb-4b48-b17d-64c6587dc3fb-kube-api-access-j2rs4\") pod \"octavia-operator-controller-manager-7bd9774b6-nwpwk\" (UID: \"4027b29c-39eb-4b48-b17d-64c6587dc3fb\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.954314 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vqj7v\" (UniqueName: \"kubernetes.io/projected/a22f5f2d-ed47-4190-84a2-5243a6479598-kube-api-access-vqj7v\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.954477 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhbl4\" (UniqueName: \"kubernetes.io/projected/40fc8f9a-303c-4264-ac77-448100591967-kube-api-access-dhbl4\") pod \"neutron-operator-controller-manager-5d8f59fb49-b78cx\" (UID: \"40fc8f9a-303c-4264-ac77-448100591967\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.954520 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.954041 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.963378 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc"] Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.974044 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.990470 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j2rs4\" (UniqueName: \"kubernetes.io/projected/4027b29c-39eb-4b48-b17d-64c6587dc3fb-kube-api-access-j2rs4\") pod \"octavia-operator-controller-manager-7bd9774b6-nwpwk\" (UID: \"4027b29c-39eb-4b48-b17d-64c6587dc3fb\") " pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" Jan 23 07:11:58 crc kubenswrapper[5102]: I0123 07:11:58.994669 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4jzs8\" (UniqueName: \"kubernetes.io/projected/750c537e-8dea-47b4-883c-c30a44e5f48c-kube-api-access-4jzs8\") pod \"nova-operator-controller-manager-6b8bc8d87d-p4czx\" (UID: \"750c537e-8dea-47b4-883c-c30a44e5f48c\") " pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.006956 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhbl4\" (UniqueName: \"kubernetes.io/projected/40fc8f9a-303c-4264-ac77-448100591967-kube-api-access-dhbl4\") pod \"neutron-operator-controller-manager-5d8f59fb49-b78cx\" (UID: \"40fc8f9a-303c-4264-ac77-448100591967\") " pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.009788 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.034024 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.038845 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.041936 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.064709 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skhqh\" (UniqueName: \"kubernetes.io/projected/8d146faa-8342-4adc-8e6d-37018df6873f-kube-api-access-skhqh\") pod \"ovn-operator-controller-manager-55db956ddc-bmq5z\" (UID: \"8d146faa-8342-4adc-8e6d-37018df6873f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.064782 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vqj7v\" (UniqueName: \"kubernetes.io/projected/a22f5f2d-ed47-4190-84a2-5243a6479598-kube-api-access-vqj7v\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.064853 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.065017 5102 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.065069 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert podName:a22f5f2d-ed47-4190-84a2-5243a6479598 nodeName:}" failed. No retries permitted until 2026-01-23 07:11:59.565051366 +0000 UTC m=+1070.385400341 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert") pod "openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" (UID: "a22f5f2d-ed47-4190-84a2-5243a6479598") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.074752 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"placement-operator-controller-manager-dockercfg-xj6k8" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.075011 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.076231 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.085904 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.088674 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"swift-operator-controller-manager-dockercfg-j7jgd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.120875 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vqj7v\" (UniqueName: \"kubernetes.io/projected/a22f5f2d-ed47-4190-84a2-5243a6479598-kube-api-access-vqj7v\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.120981 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.122458 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skhqh\" (UniqueName: \"kubernetes.io/projected/8d146faa-8342-4adc-8e6d-37018df6873f-kube-api-access-skhqh\") pod \"ovn-operator-controller-manager-55db956ddc-bmq5z\" (UID: \"8d146faa-8342-4adc-8e6d-37018df6873f\") " pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.166092 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.166184 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hhc9j\" (UniqueName: \"kubernetes.io/projected/ff029305-9cf1-451a-b5b8-ff55bfc14dd3-kube-api-access-hhc9j\") pod \"placement-operator-controller-manager-5d646b7d76-j6qxz\" (UID: \"ff029305-9cf1-451a-b5b8-ff55bfc14dd3\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.166244 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4k24\" (UniqueName: \"kubernetes.io/projected/9f0624d9-4655-424e-bcc9-2e445bb833c7-kube-api-access-f4k24\") pod \"swift-operator-controller-manager-547cbdb99f-6js2g\" (UID: \"9f0624d9-4655-424e-bcc9-2e445bb833c7\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.166403 5102 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.166457 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert podName:8661bd2c-86be-46fd-95d7-df60f1736855 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:00.166436468 +0000 UTC m=+1070.986785443 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert") pod "infra-operator-controller-manager-54ccf4f85d-lwblf" (UID: "8661bd2c-86be-46fd-95d7-df60f1736855") : secret "infra-operator-webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.174643 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.176003 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.189365 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"telemetry-operator-controller-manager-dockercfg-vvsc7" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.198344 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.231103 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.252043 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.269673 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hhc9j\" (UniqueName: \"kubernetes.io/projected/ff029305-9cf1-451a-b5b8-ff55bfc14dd3-kube-api-access-hhc9j\") pod \"placement-operator-controller-manager-5d646b7d76-j6qxz\" (UID: \"ff029305-9cf1-451a-b5b8-ff55bfc14dd3\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.269730 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wgt7q\" (UniqueName: \"kubernetes.io/projected/d9c398eb-293d-47e1-9f0d-2ce33fd8878f-kube-api-access-wgt7q\") pod \"telemetry-operator-controller-manager-85cd9769bb-7nxxd\" (UID: \"d9c398eb-293d-47e1-9f0d-2ce33fd8878f\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.269794 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4k24\" (UniqueName: \"kubernetes.io/projected/9f0624d9-4655-424e-bcc9-2e445bb833c7-kube-api-access-f4k24\") pod \"swift-operator-controller-manager-547cbdb99f-6js2g\" (UID: \"9f0624d9-4655-424e-bcc9-2e445bb833c7\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.280682 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.281905 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.299854 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"test-operator-controller-manager-dockercfg-f79pd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.300078 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.338572 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hhc9j\" (UniqueName: \"kubernetes.io/projected/ff029305-9cf1-451a-b5b8-ff55bfc14dd3-kube-api-access-hhc9j\") pod \"placement-operator-controller-manager-5d646b7d76-j6qxz\" (UID: \"ff029305-9cf1-451a-b5b8-ff55bfc14dd3\") " pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.368820 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4k24\" (UniqueName: \"kubernetes.io/projected/9f0624d9-4655-424e-bcc9-2e445bb833c7-kube-api-access-f4k24\") pod \"swift-operator-controller-manager-547cbdb99f-6js2g\" (UID: \"9f0624d9-4655-424e-bcc9-2e445bb833c7\") " pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.371903 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wpqpz\" (UniqueName: \"kubernetes.io/projected/fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8-kube-api-access-wpqpz\") pod \"test-operator-controller-manager-69797bbcbd-jhfvr\" (UID: \"fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.372025 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wgt7q\" (UniqueName: \"kubernetes.io/projected/d9c398eb-293d-47e1-9f0d-2ce33fd8878f-kube-api-access-wgt7q\") pod \"telemetry-operator-controller-manager-85cd9769bb-7nxxd\" (UID: \"d9c398eb-293d-47e1-9f0d-2ce33fd8878f\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.381110 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.382024 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.392271 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"watcher-operator-controller-manager-dockercfg-bnrpk" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.396292 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wgt7q\" (UniqueName: \"kubernetes.io/projected/d9c398eb-293d-47e1-9f0d-2ce33fd8878f-kube-api-access-wgt7q\") pod \"telemetry-operator-controller-manager-85cd9769bb-7nxxd\" (UID: \"d9c398eb-293d-47e1-9f0d-2ce33fd8878f\") " pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.401050 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.424821 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.467812 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.478478 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kkz5l\" (UniqueName: \"kubernetes.io/projected/f5a445df-fe4d-4323-993e-7d9f20cdd29c-kube-api-access-kkz5l\") pod \"watcher-operator-controller-manager-5ffb9c6597-hxkkl\" (UID: \"f5a445df-fe4d-4323-993e-7d9f20cdd29c\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.478589 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wpqpz\" (UniqueName: \"kubernetes.io/projected/fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8-kube-api-access-wpqpz\") pod \"test-operator-controller-manager-69797bbcbd-jhfvr\" (UID: \"fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.530576 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.530591 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.534979 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.546160 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"metrics-server-cert" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.546260 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-g2zcp" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.546167 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"webhook-server-cert" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.589762 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.589832 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdvx7\" (UniqueName: \"kubernetes.io/projected/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-kube-api-access-kdvx7\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.589862 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kkz5l\" (UniqueName: \"kubernetes.io/projected/f5a445df-fe4d-4323-993e-7d9f20cdd29c-kube-api-access-kkz5l\") pod \"watcher-operator-controller-manager-5ffb9c6597-hxkkl\" (UID: \"f5a445df-fe4d-4323-993e-7d9f20cdd29c\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.589921 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.589978 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.590220 5102 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.590280 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert podName:a22f5f2d-ed47-4190-84a2-5243a6479598 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:00.59025864 +0000 UTC m=+1071.410607605 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert") pod "openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" (UID: "a22f5f2d-ed47-4190-84a2-5243a6479598") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.590936 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.591792 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wpqpz\" (UniqueName: \"kubernetes.io/projected/fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8-kube-api-access-wpqpz\") pod \"test-operator-controller-manager-69797bbcbd-jhfvr\" (UID: \"fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8\") " pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.608774 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.631009 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.631062 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd"] Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.631840 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.635163 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"rabbitmq-cluster-operator-controller-manager-dockercfg-frt8k" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.652265 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kkz5l\" (UniqueName: \"kubernetes.io/projected/f5a445df-fe4d-4323-993e-7d9f20cdd29c-kube-api-access-kkz5l\") pod \"watcher-operator-controller-manager-5ffb9c6597-hxkkl\" (UID: \"f5a445df-fe4d-4323-993e-7d9f20cdd29c\") " pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.666752 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.693652 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slf2c\" (UniqueName: \"kubernetes.io/projected/937d6dd8-25fe-4346-80f5-345f3f772ed9-kube-api-access-slf2c\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cdzmd\" (UID: \"937d6dd8-25fe-4346-80f5-345f3f772ed9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.694149 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.694362 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.694584 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdvx7\" (UniqueName: \"kubernetes.io/projected/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-kube-api-access-kdvx7\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.695750 5102 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.695933 5102 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.696713 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:00.195819943 +0000 UTC m=+1071.016168918 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "webhook-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: E0123 07:11:59.696763 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:00.196752083 +0000 UTC m=+1071.017101058 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "metrics-server-cert" not found Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.719954 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.732273 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdvx7\" (UniqueName: \"kubernetes.io/projected/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-kube-api-access-kdvx7\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.795998 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slf2c\" (UniqueName: \"kubernetes.io/projected/937d6dd8-25fe-4346-80f5-345f3f772ed9-kube-api-access-slf2c\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cdzmd\" (UID: \"937d6dd8-25fe-4346-80f5-345f3f772ed9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.818444 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slf2c\" (UniqueName: \"kubernetes.io/projected/937d6dd8-25fe-4346-80f5-345f3f772ed9-kube-api-access-slf2c\") pod \"rabbitmq-cluster-operator-manager-668c99d594-cdzmd\" (UID: \"937d6dd8-25fe-4346-80f5-345f3f772ed9\") " pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.836851 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" Jan 23 07:11:59 crc kubenswrapper[5102]: I0123 07:11:59.969341 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" event={"ID":"1ae0c44b-b391-4df4-8246-b0e24f649e8b","Type":"ContainerStarted","Data":"00e24148359e9b902c67bd2f79a5a92ab956b4e990137b15a3cd2361fa7fc7f6"} Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.206265 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.206370 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.206447 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.206661 5102 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.206741 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert podName:8661bd2c-86be-46fd-95d7-df60f1736855 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:02.206712608 +0000 UTC m=+1073.027061583 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert") pod "infra-operator-controller-manager-54ccf4f85d-lwblf" (UID: "8661bd2c-86be-46fd-95d7-df60f1736855") : secret "infra-operator-webhook-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.207285 5102 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.207318 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:01.207308297 +0000 UTC m=+1072.027657272 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "webhook-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.207370 5102 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.207399 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:01.20739014 +0000 UTC m=+1072.027739115 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "metrics-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.572587 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn"] Jan 23 07:12:00 crc kubenswrapper[5102]: W0123 07:12:00.575848 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod51b93e08_8c79_4ccc_b4d4_c5d54e095284.slice/crio-2145a7267c961483b13ceee54d06cd433c6d2bd1230b42b05a687d6f3ede9aa3 WatchSource:0}: Error finding container 2145a7267c961483b13ceee54d06cd433c6d2bd1230b42b05a687d6f3ede9aa3: Status 404 returned error can't find the container with id 2145a7267c961483b13ceee54d06cd433c6d2bd1230b42b05a687d6f3ede9aa3 Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.614815 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.615073 5102 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.615146 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert podName:a22f5f2d-ed47-4190-84a2-5243a6479598 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:02.615123586 +0000 UTC m=+1073.435472561 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert") pod "openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" (UID: "a22f5f2d-ed47-4190-84a2-5243a6479598") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.726728 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.763694 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.767736 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.780785 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.790457 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.796895 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.803740 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7"] Jan 23 07:12:00 crc kubenswrapper[5102]: W0123 07:12:00.810286 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod73336c80_3616_4716_9ecf_cfe3f2114c4a.slice/crio-2f8aa7152017b3d000d8af6505838c9ff1a15d845c56a4ebb3debd31eec00f5c WatchSource:0}: Error finding container 2f8aa7152017b3d000d8af6505838c9ff1a15d845c56a4ebb3debd31eec00f5c: Status 404 returned error can't find the container with id 2f8aa7152017b3d000d8af6505838c9ff1a15d845c56a4ebb3debd31eec00f5c Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.909967 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.915576 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj"] Jan 23 07:12:00 crc kubenswrapper[5102]: W0123 07:12:00.916158 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod750c537e_8dea_47b4_883c_c30a44e5f48c.slice/crio-5e038fe84112527245d91d468f3bd567772fa26ed6009ee99575416dcc52b27e WatchSource:0}: Error finding container 5e038fe84112527245d91d468f3bd567772fa26ed6009ee99575416dcc52b27e: Status 404 returned error can't find the container with id 5e038fe84112527245d91d468f3bd567772fa26ed6009ee99575416dcc52b27e Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.924178 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx"] Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.955602 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx"] Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.978136 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/neutron-operator@sha256:b57d65d2a968705b9067192a7cb33bd4a12489db87e1d05de78c076f2062cab4,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-dhbl4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod neutron-operator-controller-manager-5d8f59fb49-b78cx_openstack-operators(40fc8f9a-303c-4264-ac77-448100591967): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 23 07:12:00 crc kubenswrapper[5102]: E0123 07:12:00.979303 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" podUID="40fc8f9a-303c-4264-ac77-448100591967" Jan 23 07:12:00 crc kubenswrapper[5102]: I0123 07:12:00.997359 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" event={"ID":"9c38e15e-2ddd-473c-892a-59aa9978e12c","Type":"ContainerStarted","Data":"416bf339293e3986a988be053159083924db680c5becd1bc1280bf652ce1f039"} Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.000929 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz"] Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.006335 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" event={"ID":"d31fc107-8403-4c08-9058-483dafc58c60","Type":"ContainerStarted","Data":"7e971730c2e9a874622ef91c3325c4e8307eb3b17ec6b7bdd8e97b809fd7aa0b"} Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.008225 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" event={"ID":"73336c80-3616-4716-9ecf-cfe3f2114c4a","Type":"ContainerStarted","Data":"2f8aa7152017b3d000d8af6505838c9ff1a15d845c56a4ebb3debd31eec00f5c"} Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.009394 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" event={"ID":"9a8349c3-c1d1-4549-9a7f-67755c04328f","Type":"ContainerStarted","Data":"ab03e95311e639a4008dd340f567fdfca6217df041375ea1f6918e5016240a7d"} Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.013051 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" event={"ID":"4027b29c-39eb-4b48-b17d-64c6587dc3fb","Type":"ContainerStarted","Data":"b99acf548836eb627f42d20b2ca803c816920317e93f9046db7fb219e7481fa5"} Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.016239 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" event={"ID":"40fc8f9a-303c-4264-ac77-448100591967","Type":"ContainerStarted","Data":"e7583634c069526b2a92f83aa22f4181d02dce1589c428f9f843f00320367135"} Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.017508 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:b57d65d2a968705b9067192a7cb33bd4a12489db87e1d05de78c076f2062cab4\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" podUID="40fc8f9a-303c-4264-ac77-448100591967" Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.026168 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr"] Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.026217 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" event={"ID":"51b93e08-8c79-4ccc-b4d4-c5d54e095284","Type":"ContainerStarted","Data":"2145a7267c961483b13ceee54d06cd433c6d2bd1230b42b05a687d6f3ede9aa3"} Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.026773 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wpqpz,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod test-operator-controller-manager-69797bbcbd-jhfvr_openstack-operators(fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.028229 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" podUID="fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8" Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.028733 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" event={"ID":"529e16f1-1e4b-4ba2-8855-e8a445d0c63f","Type":"ContainerStarted","Data":"6b04ba6c325596b303af527592a83feb34fe39ebd9722a2b3217f4d0f21c4628"} Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.029007 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-f4k24,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod swift-operator-controller-manager-547cbdb99f-6js2g_openstack-operators(9f0624d9-4655-424e-bcc9-2e445bb833c7): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.031274 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" podUID="9f0624d9-4655-424e-bcc9-2e445bb833c7" Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.031385 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g"] Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.042413 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd"] Jan 23 07:12:01 crc kubenswrapper[5102]: W0123 07:12:01.045811 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod937d6dd8_25fe_4346_80f5_345f3f772ed9.slice/crio-243931bb44ca72a32e3e7210a36e4da25affc3ad14ad03ced8d984bcd04a5da5 WatchSource:0}: Error finding container 243931bb44ca72a32e3e7210a36e4da25affc3ad14ad03ced8d984bcd04a5da5: Status 404 returned error can't find the container with id 243931bb44ca72a32e3e7210a36e4da25affc3ad14ad03ced8d984bcd04a5da5 Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.046745 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" event={"ID":"750c537e-8dea-47b4-883c-c30a44e5f48c","Type":"ContainerStarted","Data":"5e038fe84112527245d91d468f3bd567772fa26ed6009ee99575416dcc52b27e"} Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.049942 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl"] Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.054587 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" event={"ID":"80047622-db1e-4345-b2aa-e44f716fe6ad","Type":"ContainerStarted","Data":"80e7edb279a36ef664c072129be035a99772f475bb98810233a8e2d5dcb994ef"} Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.054933 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:operator,Image:quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2,Command:[/manager],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:metrics,HostPort:0,ContainerPort:9782,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:OPERATOR_NAMESPACE,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:metadata.namespace,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{200 -3} {} 200m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-slf2c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000660000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cluster-operator-manager-668c99d594-cdzmd_openstack-operators(937d6dd8-25fe-4346-80f5-345f3f772ed9): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.056050 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" podUID="937d6dd8-25fe-4346-80f5-345f3f772ed9" Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.058177 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" event={"ID":"52d07fd1-692c-461a-baf4-51d4af679796","Type":"ContainerStarted","Data":"2ddd1a95e36424d3cc566c3271e8152cb32f41eeaf0e0f825aa288b3463f561a"} Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.058308 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z"] Jan 23 07:12:01 crc kubenswrapper[5102]: W0123 07:12:01.060161 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf5a445df_fe4d_4323_993e_7d9f20cdd29c.slice/crio-b7128cddaad0e7569ad3ebc275b5e7fb5f9ea4d1a9c74f5ada3bbf7d51f29f5c WatchSource:0}: Error finding container b7128cddaad0e7569ad3ebc275b5e7fb5f9ea4d1a9c74f5ada3bbf7d51f29f5c: Status 404 returned error can't find the container with id b7128cddaad0e7569ad3ebc275b5e7fb5f9ea4d1a9c74f5ada3bbf7d51f29f5c Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.061062 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" event={"ID":"eab33781-ceb7-4c8b-8df9-55ca5ab33f17","Type":"ContainerStarted","Data":"1ef0ce55f51b5b5fe84d494cb621417f66c5d5744ba6f0136e599b32651c405e"} Jan 23 07:12:01 crc kubenswrapper[5102]: W0123 07:12:01.063658 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8d146faa_8342_4adc_8e6d_37018df6873f.slice/crio-7405fd36b2e9b841fd16b53b42925676b8791326bdd9c401facd8f68d14522b6 WatchSource:0}: Error finding container 7405fd36b2e9b841fd16b53b42925676b8791326bdd9c401facd8f68d14522b6: Status 404 returned error can't find the container with id 7405fd36b2e9b841fd16b53b42925676b8791326bdd9c401facd8f68d14522b6 Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.063705 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd"] Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.067524 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-kkz5l,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod watcher-operator-controller-manager-5ffb9c6597-hxkkl_openstack-operators(f5a445df-fe4d-4323-993e-7d9f20cdd29c): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.067943 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-skhqh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod ovn-operator-controller-manager-55db956ddc-bmq5z_openstack-operators(8d146faa-8342-4adc-8e6d-37018df6873f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.068910 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" podUID="f5a445df-fe4d-4323-993e-7d9f20cdd29c" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.069340 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" podUID="8d146faa-8342-4adc-8e6d-37018df6873f" Jan 23 07:12:01 crc kubenswrapper[5102]: W0123 07:12:01.069830 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd9c398eb_293d_47e1_9f0d_2ce33fd8878f.slice/crio-c4db747fd3cf13ba3f9198cb1d25daca5cd7a765624b48d81ea21292b76f8022 WatchSource:0}: Error finding container c4db747fd3cf13ba3f9198cb1d25daca5cd7a765624b48d81ea21292b76f8022: Status 404 returned error can't find the container with id c4db747fd3cf13ba3f9198cb1d25daca5cd7a765624b48d81ea21292b76f8022 Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.075056 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-wgt7q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod telemetry-operator-controller-manager-85cd9769bb-7nxxd_openstack-operators(d9c398eb-293d-47e1-9f0d-2ce33fd8878f): ErrImagePull: pull QPS exceeded" logger="UnhandledError" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.076241 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"pull QPS exceeded\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" podUID="d9c398eb-293d-47e1-9f0d-2ce33fd8878f" Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.227495 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:01 crc kubenswrapper[5102]: I0123 07:12:01.227627 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.227869 5102 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.227941 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:03.227915398 +0000 UTC m=+1074.048264373 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "metrics-server-cert" not found Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.228426 5102 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 07:12:01 crc kubenswrapper[5102]: E0123 07:12:01.228457 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:03.228446055 +0000 UTC m=+1074.048795030 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "webhook-server-cert" not found Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.076291 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" event={"ID":"9f0624d9-4655-424e-bcc9-2e445bb833c7","Type":"ContainerStarted","Data":"54d42c6cd39eaa79399c9e2ba07063dadb2af0301ac090e7d3e2897bdda4b2ba"} Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.080391 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" event={"ID":"fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8","Type":"ContainerStarted","Data":"85315c295854384ee8d3b4807fe96c6288fd091d75f36759ef3be538f842062b"} Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.081865 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" podUID="9f0624d9-4655-424e-bcc9-2e445bb833c7" Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.085522 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" podUID="fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8" Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.087563 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" event={"ID":"f5a445df-fe4d-4323-993e-7d9f20cdd29c","Type":"ContainerStarted","Data":"b7128cddaad0e7569ad3ebc275b5e7fb5f9ea4d1a9c74f5ada3bbf7d51f29f5c"} Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.089557 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" podUID="f5a445df-fe4d-4323-993e-7d9f20cdd29c" Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.090353 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" event={"ID":"8d146faa-8342-4adc-8e6d-37018df6873f","Type":"ContainerStarted","Data":"7405fd36b2e9b841fd16b53b42925676b8791326bdd9c401facd8f68d14522b6"} Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.092356 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" podUID="8d146faa-8342-4adc-8e6d-37018df6873f" Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.092916 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" event={"ID":"ff029305-9cf1-451a-b5b8-ff55bfc14dd3","Type":"ContainerStarted","Data":"840b906560f3ce6517d53d91d1c3bfaa94b9dc61730e58801062a34fd4672707"} Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.094965 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" event={"ID":"d9c398eb-293d-47e1-9f0d-2ce33fd8878f","Type":"ContainerStarted","Data":"c4db747fd3cf13ba3f9198cb1d25daca5cd7a765624b48d81ea21292b76f8022"} Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.100028 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" event={"ID":"937d6dd8-25fe-4346-80f5-345f3f772ed9","Type":"ContainerStarted","Data":"243931bb44ca72a32e3e7210a36e4da25affc3ad14ad03ced8d984bcd04a5da5"} Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.104346 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" podUID="937d6dd8-25fe-4346-80f5-345f3f772ed9" Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.104453 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" podUID="d9c398eb-293d-47e1-9f0d-2ce33fd8878f" Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.104516 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/neutron-operator@sha256:b57d65d2a968705b9067192a7cb33bd4a12489db87e1d05de78c076f2062cab4\\\"\"" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" podUID="40fc8f9a-303c-4264-ac77-448100591967" Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.252757 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.253085 5102 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.253343 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert podName:8661bd2c-86be-46fd-95d7-df60f1736855 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:06.253308041 +0000 UTC m=+1077.073657016 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert") pod "infra-operator-controller-manager-54ccf4f85d-lwblf" (UID: "8661bd2c-86be-46fd-95d7-df60f1736855") : secret "infra-operator-webhook-server-cert" not found Jan 23 07:12:02 crc kubenswrapper[5102]: I0123 07:12:02.659496 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.660010 5102 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:12:02 crc kubenswrapper[5102]: E0123 07:12:02.660161 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert podName:a22f5f2d-ed47-4190-84a2-5243a6479598 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:06.660127819 +0000 UTC m=+1077.480476784 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert") pod "openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" (UID: "a22f5f2d-ed47-4190-84a2-5243a6479598") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.114040 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/ovn-operator@sha256:8b3bfb9e86618b7ac69443939b0968fae28a22cd62ea1e429b599ff9f8a5f8cf\\\"\"" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" podUID="8d146faa-8342-4adc-8e6d-37018df6873f" Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.114047 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"operator\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/rabbitmq-cluster-operator@sha256:893e66303c1b0bc1d00a299a3f0380bad55c8dc813c8a1c6a4aab379f5aa12a2\\\"\"" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" podUID="937d6dd8-25fe-4346-80f5-345f3f772ed9" Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.114125 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/telemetry-operator@sha256:e02722d7581bfe1c5fc13e2fa6811d8665102ba86635c77547abf6b933cde127\\\"\"" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" podUID="d9c398eb-293d-47e1-9f0d-2ce33fd8878f" Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.114383 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/test-operator@sha256:c8dde42dafd41026ed2e4cfc26efc0fff63c4ba9d31326ae7dc644ccceaafa9d\\\"\"" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" podUID="fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8" Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.114427 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/watcher-operator@sha256:2d6d13b3c28e45c6bec980b8808dda8da4723ae87e66d04f53d52c3b3c51612b\\\"\"" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" podUID="f5a445df-fe4d-4323-993e-7d9f20cdd29c" Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.114474 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/swift-operator@sha256:445e951df2f21df6d33a466f75917e0f6103052ae751ae11887136e8ab165922\\\"\"" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" podUID="9f0624d9-4655-424e-bcc9-2e445bb833c7" Jan 23 07:12:03 crc kubenswrapper[5102]: I0123 07:12:03.270502 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:03 crc kubenswrapper[5102]: I0123 07:12:03.270613 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.270833 5102 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.270895 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:07.270873136 +0000 UTC m=+1078.091222111 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "metrics-server-cert" not found Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.271286 5102 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 07:12:03 crc kubenswrapper[5102]: E0123 07:12:03.271311 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:07.27130312 +0000 UTC m=+1078.091652095 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "webhook-server-cert" not found Jan 23 07:12:06 crc kubenswrapper[5102]: I0123 07:12:06.347128 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:06 crc kubenswrapper[5102]: E0123 07:12:06.347313 5102 secret.go:188] Couldn't get secret openstack-operators/infra-operator-webhook-server-cert: secret "infra-operator-webhook-server-cert" not found Jan 23 07:12:06 crc kubenswrapper[5102]: E0123 07:12:06.347398 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert podName:8661bd2c-86be-46fd-95d7-df60f1736855 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:14.347377302 +0000 UTC m=+1085.167726277 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert") pod "infra-operator-controller-manager-54ccf4f85d-lwblf" (UID: "8661bd2c-86be-46fd-95d7-df60f1736855") : secret "infra-operator-webhook-server-cert" not found Jan 23 07:12:06 crc kubenswrapper[5102]: I0123 07:12:06.755506 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:06 crc kubenswrapper[5102]: E0123 07:12:06.755737 5102 secret.go:188] Couldn't get secret openstack-operators/openstack-baremetal-operator-webhook-server-cert: secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:12:06 crc kubenswrapper[5102]: E0123 07:12:06.755859 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert podName:a22f5f2d-ed47-4190-84a2-5243a6479598 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:14.755832513 +0000 UTC m=+1085.576181488 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "cert" (UniqueName: "kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert") pod "openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" (UID: "a22f5f2d-ed47-4190-84a2-5243a6479598") : secret "openstack-baremetal-operator-webhook-server-cert" not found Jan 23 07:12:07 crc kubenswrapper[5102]: I0123 07:12:07.367441 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:07 crc kubenswrapper[5102]: I0123 07:12:07.367560 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:07 crc kubenswrapper[5102]: E0123 07:12:07.367663 5102 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 07:12:07 crc kubenswrapper[5102]: E0123 07:12:07.367757 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:15.367733246 +0000 UTC m=+1086.188082221 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "webhook-server-cert" not found Jan 23 07:12:07 crc kubenswrapper[5102]: E0123 07:12:07.367776 5102 secret.go:188] Couldn't get secret openstack-operators/metrics-server-cert: secret "metrics-server-cert" not found Jan 23 07:12:07 crc kubenswrapper[5102]: E0123 07:12:07.367847 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:15.367826859 +0000 UTC m=+1086.188175824 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "metrics-server-cert" not found Jan 23 07:12:14 crc kubenswrapper[5102]: I0123 07:12:14.440394 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:14 crc kubenswrapper[5102]: I0123 07:12:14.447561 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/8661bd2c-86be-46fd-95d7-df60f1736855-cert\") pod \"infra-operator-controller-manager-54ccf4f85d-lwblf\" (UID: \"8661bd2c-86be-46fd-95d7-df60f1736855\") " pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:14 crc kubenswrapper[5102]: I0123 07:12:14.540453 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"infra-operator-controller-manager-dockercfg-8q4bf" Jan 23 07:12:14 crc kubenswrapper[5102]: I0123 07:12:14.547175 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:14 crc kubenswrapper[5102]: I0123 07:12:14.848001 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:14 crc kubenswrapper[5102]: I0123 07:12:14.857394 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/a22f5f2d-ed47-4190-84a2-5243a6479598-cert\") pod \"openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc\" (UID: \"a22f5f2d-ed47-4190-84a2-5243a6479598\") " pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:15 crc kubenswrapper[5102]: I0123 07:12:15.091666 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-baremetal-operator-controller-manager-dockercfg-rs8sr" Jan 23 07:12:15 crc kubenswrapper[5102]: I0123 07:12:15.099713 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:15 crc kubenswrapper[5102]: I0123 07:12:15.461251 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:15 crc kubenswrapper[5102]: I0123 07:12:15.463199 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:15 crc kubenswrapper[5102]: E0123 07:12:15.463349 5102 secret.go:188] Couldn't get secret openstack-operators/webhook-server-cert: secret "webhook-server-cert" not found Jan 23 07:12:15 crc kubenswrapper[5102]: E0123 07:12:15.463452 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs podName:0f5bd4e4-b7d3-45ff-9efb-e2b55f546039 nodeName:}" failed. No retries permitted until 2026-01-23 07:12:31.463428501 +0000 UTC m=+1102.283777476 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "webhook-certs" (UniqueName: "kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs") pod "openstack-operator-controller-manager-57c46955cf-k49t5" (UID: "0f5bd4e4-b7d3-45ff-9efb-e2b55f546039") : secret "webhook-server-cert" not found Jan 23 07:12:15 crc kubenswrapper[5102]: I0123 07:12:15.467390 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-metrics-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:16 crc kubenswrapper[5102]: E0123 07:12:16.587845 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349" Jan 23 07:12:16 crc kubenswrapper[5102]: E0123 07:12:16.588125 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-xnn4z,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod keystone-operator-controller-manager-b8b6d4659-52bz9_openstack-operators(9a8349c3-c1d1-4549-9a7f-67755c04328f): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:12:16 crc kubenswrapper[5102]: E0123 07:12:16.589379 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" podUID="9a8349c3-c1d1-4549-9a7f-67755c04328f" Jan 23 07:12:17 crc kubenswrapper[5102]: E0123 07:12:17.225891 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/keystone-operator@sha256:8e340ff11922b38e811261de96982e1aff5f4eb8f225d1d9f5973025a4fe8349\\\"\"" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" podUID="9a8349c3-c1d1-4549-9a7f-67755c04328f" Jan 23 07:12:27 crc kubenswrapper[5102]: E0123 07:12:27.329627 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492" Jan 23 07:12:27 crc kubenswrapper[5102]: E0123 07:12:27.330786 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-48pqq,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod heat-operator-controller-manager-594c8c9d5d-mjxxp_openstack-operators(9c38e15e-2ddd-473c-892a-59aa9978e12c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:12:27 crc kubenswrapper[5102]: E0123 07:12:27.332065 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" podUID="9c38e15e-2ddd-473c-892a-59aa9978e12c" Jan 23 07:12:27 crc kubenswrapper[5102]: E0123 07:12:27.367531 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831" Jan 23 07:12:27 crc kubenswrapper[5102]: E0123 07:12:27.367992 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:manager,Image:quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831,Command:[/manager],Args:[--leader-elect --health-probe-bind-address=:8081 --metrics-bind-address=127.0.0.1:8080],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:LEASE_DURATION,Value:30,ValueFrom:nil,},EnvVar{Name:RENEW_DEADLINE,Value:20,ValueFrom:nil,},EnvVar{Name:RETRY_PERIOD,Value:5,ValueFrom:nil,},EnvVar{Name:ENABLE_WEBHOOKS,Value:false,ValueFrom:nil,},EnvVar{Name:METRICS_CERTS,Value:false,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{500 -3} {} 500m DecimalSI},memory: {{536870912 0} {} BinarySI},},Requests:ResourceList{cpu: {{10 -3} {} 10m DecimalSI},memory: {{268435456 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-4jzs8,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/healthz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:15,TimeoutSeconds:1,PeriodSeconds:20,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:&HTTPGetAction{Path:/readyz,Port:{0 8081 },Host:,Scheme:HTTP,HTTPHeaders:[]HTTPHeader{},},TCPSocket:nil,GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod nova-operator-controller-manager-6b8bc8d87d-p4czx_openstack-operators(750c537e-8dea-47b4-883c-c30a44e5f48c): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:12:27 crc kubenswrapper[5102]: E0123 07:12:27.369183 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" podUID="750c537e-8dea-47b4-883c-c30a44e5f48c" Jan 23 07:12:27 crc kubenswrapper[5102]: I0123 07:12:27.871274 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc"] Jan 23 07:12:27 crc kubenswrapper[5102]: I0123 07:12:27.930859 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf"] Jan 23 07:12:28 crc kubenswrapper[5102]: E0123 07:12:28.330712 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/nova-operator@sha256:4e995cfa360a9d595a01b9c0541ab934692f2374203cb5738127dd784f793831\\\"\"" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" podUID="750c537e-8dea-47b4-883c-c30a44e5f48c" Jan 23 07:12:28 crc kubenswrapper[5102]: E0123 07:12:28.331222 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/openstack-k8s-operators/heat-operator@sha256:2f9a2f064448faebbae58f52d564dc0e8e39bed0fc12bd6b9fe925e42f1b5492\\\"\"" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" podUID="9c38e15e-2ddd-473c-892a-59aa9978e12c" Jan 23 07:12:28 crc kubenswrapper[5102]: W0123 07:12:28.507720 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda22f5f2d_ed47_4190_84a2_5243a6479598.slice/crio-e808b65475b6995938772cba7893b101a2051b54f043947bff296f6c88c8fb67 WatchSource:0}: Error finding container e808b65475b6995938772cba7893b101a2051b54f043947bff296f6c88c8fb67: Status 404 returned error can't find the container with id e808b65475b6995938772cba7893b101a2051b54f043947bff296f6c88c8fb67 Jan 23 07:12:28 crc kubenswrapper[5102]: W0123 07:12:28.511023 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8661bd2c_86be_46fd_95d7_df60f1736855.slice/crio-4079d2d2b2f9d56d86d9109f7f22dbfd6b9822a3d99fea0f8009adcb132f8b8c WatchSource:0}: Error finding container 4079d2d2b2f9d56d86d9109f7f22dbfd6b9822a3d99fea0f8009adcb132f8b8c: Status 404 returned error can't find the container with id 4079d2d2b2f9d56d86d9109f7f22dbfd6b9822a3d99fea0f8009adcb132f8b8c Jan 23 07:12:29 crc kubenswrapper[5102]: I0123 07:12:29.338389 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" event={"ID":"a22f5f2d-ed47-4190-84a2-5243a6479598","Type":"ContainerStarted","Data":"e808b65475b6995938772cba7893b101a2051b54f043947bff296f6c88c8fb67"} Jan 23 07:12:29 crc kubenswrapper[5102]: I0123 07:12:29.340069 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" event={"ID":"8661bd2c-86be-46fd-95d7-df60f1736855","Type":"ContainerStarted","Data":"4079d2d2b2f9d56d86d9109f7f22dbfd6b9822a3d99fea0f8009adcb132f8b8c"} Jan 23 07:12:30 crc kubenswrapper[5102]: I0123 07:12:30.354352 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" event={"ID":"1ae0c44b-b391-4df4-8246-b0e24f649e8b","Type":"ContainerStarted","Data":"cf665b2eeb1f56009151e15a7fa90dc062f7734c53499cb9e26a045b91734efe"} Jan 23 07:12:30 crc kubenswrapper[5102]: I0123 07:12:30.355024 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" Jan 23 07:12:30 crc kubenswrapper[5102]: I0123 07:12:30.383397 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" podStartSLOduration=4.262056466 podStartE2EDuration="32.383366192s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:11:59.275114689 +0000 UTC m=+1070.095463664" lastFinishedPulling="2026-01-23 07:12:27.396424375 +0000 UTC m=+1098.216773390" observedRunningTime="2026-01-23 07:12:30.379393326 +0000 UTC m=+1101.199742301" watchObservedRunningTime="2026-01-23 07:12:30.383366192 +0000 UTC m=+1101.203715187" Jan 23 07:12:31 crc kubenswrapper[5102]: I0123 07:12:31.507197 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:31 crc kubenswrapper[5102]: I0123 07:12:31.516072 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/0f5bd4e4-b7d3-45ff-9efb-e2b55f546039-webhook-certs\") pod \"openstack-operator-controller-manager-57c46955cf-k49t5\" (UID: \"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039\") " pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:31 crc kubenswrapper[5102]: I0123 07:12:31.628935 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack-operators"/"openstack-operator-controller-manager-dockercfg-g2zcp" Jan 23 07:12:31 crc kubenswrapper[5102]: I0123 07:12:31.636132 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.408491 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" event={"ID":"529e16f1-1e4b-4ba2-8855-e8a445d0c63f","Type":"ContainerStarted","Data":"55c6e216c544b7aea215314d4fb9fd19c31dacec464c9158c3790664f555ed1a"} Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.409259 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.419581 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" event={"ID":"d31fc107-8403-4c08-9058-483dafc58c60","Type":"ContainerStarted","Data":"77d382748a1bbbab69e363e73951be86a16990ea438e839f83e4315b0217d89b"} Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.420032 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.426123 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" event={"ID":"52d07fd1-692c-461a-baf4-51d4af679796","Type":"ContainerStarted","Data":"340cbb7791d87e73d0b2549e06d6aa5d5ddb8d1ddf3cc0512ffd97aa494698ba"} Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.426338 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.438414 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" event={"ID":"51b93e08-8c79-4ccc-b4d4-c5d54e095284","Type":"ContainerStarted","Data":"75c039ab9f9be22c267eb31e9b11c032f56ad8ff583dc9ceb6872c81621f97e2"} Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.438573 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.441281 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" podStartSLOduration=10.976829371000001 podStartE2EDuration="37.441251913s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.937207036 +0000 UTC m=+1071.757556011" lastFinishedPulling="2026-01-23 07:12:27.401629558 +0000 UTC m=+1098.221978553" observedRunningTime="2026-01-23 07:12:35.433476476 +0000 UTC m=+1106.253825451" watchObservedRunningTime="2026-01-23 07:12:35.441251913 +0000 UTC m=+1106.261600888" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.457356 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" podStartSLOduration=10.870515455 podStartE2EDuration="37.457336652s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.818375504 +0000 UTC m=+1071.638724479" lastFinishedPulling="2026-01-23 07:12:27.405196701 +0000 UTC m=+1098.225545676" observedRunningTime="2026-01-23 07:12:35.451862136 +0000 UTC m=+1106.272211131" watchObservedRunningTime="2026-01-23 07:12:35.457336652 +0000 UTC m=+1106.277685627" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.485014 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" podStartSLOduration=10.872191972 podStartE2EDuration="37.484990266s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.786868253 +0000 UTC m=+1071.607217228" lastFinishedPulling="2026-01-23 07:12:27.399666547 +0000 UTC m=+1098.220015522" observedRunningTime="2026-01-23 07:12:35.46971256 +0000 UTC m=+1106.290061545" watchObservedRunningTime="2026-01-23 07:12:35.484990266 +0000 UTC m=+1106.305339241" Jan 23 07:12:35 crc kubenswrapper[5102]: I0123 07:12:35.490689 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" podStartSLOduration=10.656730794 podStartE2EDuration="37.490663049s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.57934229 +0000 UTC m=+1071.399691265" lastFinishedPulling="2026-01-23 07:12:27.413274545 +0000 UTC m=+1098.233623520" observedRunningTime="2026-01-23 07:12:35.486435609 +0000 UTC m=+1106.306784594" watchObservedRunningTime="2026-01-23 07:12:35.490663049 +0000 UTC m=+1106.311012044" Jan 23 07:12:36 crc kubenswrapper[5102]: I0123 07:12:36.451141 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" event={"ID":"73336c80-3616-4716-9ecf-cfe3f2114c4a","Type":"ContainerStarted","Data":"22844f2f43d9a7692676ea734bdafd70088d41d8529bd6ba067f99ce8c085157"} Jan 23 07:12:36 crc kubenswrapper[5102]: I0123 07:12:36.451564 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" Jan 23 07:12:36 crc kubenswrapper[5102]: I0123 07:12:36.458050 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" event={"ID":"eab33781-ceb7-4c8b-8df9-55ca5ab33f17","Type":"ContainerStarted","Data":"bcbcc66ce2b8b0a5c9d654ac893c48e43d9c29ec611ad62e76588fd7cf4af0dc"} Jan 23 07:12:36 crc kubenswrapper[5102]: I0123 07:12:36.458103 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" Jan 23 07:12:36 crc kubenswrapper[5102]: I0123 07:12:36.486017 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" podStartSLOduration=11.906275942 podStartE2EDuration="38.485992804s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.818025774 +0000 UTC m=+1071.638374749" lastFinishedPulling="2026-01-23 07:12:27.397742596 +0000 UTC m=+1098.218091611" observedRunningTime="2026-01-23 07:12:36.470300557 +0000 UTC m=+1107.290649542" watchObservedRunningTime="2026-01-23 07:12:36.485992804 +0000 UTC m=+1107.306341789" Jan 23 07:12:36 crc kubenswrapper[5102]: I0123 07:12:36.487570 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" podStartSLOduration=11.877868376 podStartE2EDuration="38.487559052s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.787294537 +0000 UTC m=+1071.607643512" lastFinishedPulling="2026-01-23 07:12:27.396985173 +0000 UTC m=+1098.217334188" observedRunningTime="2026-01-23 07:12:36.484322044 +0000 UTC m=+1107.304671029" watchObservedRunningTime="2026-01-23 07:12:36.487559052 +0000 UTC m=+1107.307908037" Jan 23 07:12:37 crc kubenswrapper[5102]: I0123 07:12:37.707236 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5"] Jan 23 07:12:37 crc kubenswrapper[5102]: W0123 07:12:37.740122 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f5bd4e4_b7d3_45ff_9efb_e2b55f546039.slice/crio-4cbe348e330be75bcd16d74f8e61ae0318de5434b9f9dc3fb2ee943201cb8fe1 WatchSource:0}: Error finding container 4cbe348e330be75bcd16d74f8e61ae0318de5434b9f9dc3fb2ee943201cb8fe1: Status 404 returned error can't find the container with id 4cbe348e330be75bcd16d74f8e61ae0318de5434b9f9dc3fb2ee943201cb8fe1 Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.485469 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/barbican-operator-controller-manager-59dd8b7cbf-272zl" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.486485 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" event={"ID":"8661bd2c-86be-46fd-95d7-df60f1736855","Type":"ContainerStarted","Data":"1796991783bfb1e538504930986925a165ad288c0d477b738a88ddcd2997c1a2"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.487153 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.489317 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" event={"ID":"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039","Type":"ContainerStarted","Data":"fc55b32ad809a665beb5b326d4c68e6d0ad407d76cedc64243ac7acb59938b9a"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.489351 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" event={"ID":"0f5bd4e4-b7d3-45ff-9efb-e2b55f546039","Type":"ContainerStarted","Data":"4cbe348e330be75bcd16d74f8e61ae0318de5434b9f9dc3fb2ee943201cb8fe1"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.489797 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.491187 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" event={"ID":"8d146faa-8342-4adc-8e6d-37018df6873f","Type":"ContainerStarted","Data":"2e659e994c1974f4ba710e29cee6ec489c2a558781e13988ede623c7f1cbfb29"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.491601 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.493600 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" event={"ID":"ff029305-9cf1-451a-b5b8-ff55bfc14dd3","Type":"ContainerStarted","Data":"fca74bd81e9c97f66f577bdc064d6633fcd59c4e91f4e3df79779e3be6c218c1"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.494378 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.495895 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" event={"ID":"80047622-db1e-4345-b2aa-e44f716fe6ad","Type":"ContainerStarted","Data":"3fe19da8fb91e39eacd3963c17e4703cfe9814d44f27f9c3bbcda3711668685a"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.496256 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.497911 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" event={"ID":"9a8349c3-c1d1-4549-9a7f-67755c04328f","Type":"ContainerStarted","Data":"14aa67737b0c0cb6b70b121d27a89991d50574d4a630e56e0e23649e10a1d55d"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.498577 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.500519 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" event={"ID":"d9c398eb-293d-47e1-9f0d-2ce33fd8878f","Type":"ContainerStarted","Data":"47842643793d4273c4c7d328927d3b5111adf8eacb6a92b5911f63f2db6c227b"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.500904 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.502328 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" event={"ID":"937d6dd8-25fe-4346-80f5-345f3f772ed9","Type":"ContainerStarted","Data":"ba839dcbda7bfb57c6eb41a5e1ceacad3644a0c6abcd2c4ec716307f69300bc1"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.506158 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" event={"ID":"9f0624d9-4655-424e-bcc9-2e445bb833c7","Type":"ContainerStarted","Data":"c463d9b71ef68f8b1e700fa8e234927f62df75194dea6b0cb2d70781b9cf882c"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.506555 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.507876 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" event={"ID":"fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8","Type":"ContainerStarted","Data":"3429f4a6f05f91740f93204519afcdacb1270bf970626e53bf683b2a1fdd230e"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.508337 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.509985 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" event={"ID":"4027b29c-39eb-4b48-b17d-64c6587dc3fb","Type":"ContainerStarted","Data":"08fdf0789665be7833bb5c9e19f46e306adf5dd20c39e528f536e740be8a6b6f"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.510570 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.512145 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" event={"ID":"40fc8f9a-303c-4264-ac77-448100591967","Type":"ContainerStarted","Data":"cf121301b5bae080f59019b14a74d52e0b248b7bea4e24416dc502c0a69097c7"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.512630 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.514902 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" event={"ID":"f5a445df-fe4d-4323-993e-7d9f20cdd29c","Type":"ContainerStarted","Data":"70ba36081ffa9c50aca587c6b049b19aca6f7a0b2b3ee6d3fc0948eae84dfc44"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.515438 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.530464 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" event={"ID":"a22f5f2d-ed47-4190-84a2-5243a6479598","Type":"ContainerStarted","Data":"1f093ceba6f1756936cc72e852b5b4392071b8fe2b13f9cca52fbe6a8118a45a"} Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.531319 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.556445 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" podStartSLOduration=14.088526866 podStartE2EDuration="40.556418908s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.937306199 +0000 UTC m=+1071.757655174" lastFinishedPulling="2026-01-23 07:12:27.405198241 +0000 UTC m=+1098.225547216" observedRunningTime="2026-01-23 07:12:38.556129699 +0000 UTC m=+1109.376478674" watchObservedRunningTime="2026-01-23 07:12:38.556418908 +0000 UTC m=+1109.376767883" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.602657 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" podStartSLOduration=4.428683182 podStartE2EDuration="40.602634926s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:01.067125787 +0000 UTC m=+1071.887474762" lastFinishedPulling="2026-01-23 07:12:37.241077521 +0000 UTC m=+1108.061426506" observedRunningTime="2026-01-23 07:12:38.598523 +0000 UTC m=+1109.418871975" watchObservedRunningTime="2026-01-23 07:12:38.602634926 +0000 UTC m=+1109.422983901" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.610822 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.655292 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" podStartSLOduration=4.39189558 podStartE2EDuration="40.65527405s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.977903457 +0000 UTC m=+1071.798252432" lastFinishedPulling="2026-01-23 07:12:37.241281917 +0000 UTC m=+1108.061630902" observedRunningTime="2026-01-23 07:12:38.643034477 +0000 UTC m=+1109.463383452" watchObservedRunningTime="2026-01-23 07:12:38.65527405 +0000 UTC m=+1109.475623025" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.707733 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" podStartSLOduration=4.493227557 podStartE2EDuration="40.707702297s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:01.026610301 +0000 UTC m=+1071.846959276" lastFinishedPulling="2026-01-23 07:12:37.241085041 +0000 UTC m=+1108.061434016" observedRunningTime="2026-01-23 07:12:38.669898215 +0000 UTC m=+1109.490247190" watchObservedRunningTime="2026-01-23 07:12:38.707702297 +0000 UTC m=+1109.528051272" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.740929 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" podStartSLOduration=4.172285324 podStartE2EDuration="40.740904909s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.805734277 +0000 UTC m=+1071.626083252" lastFinishedPulling="2026-01-23 07:12:37.374353862 +0000 UTC m=+1108.194702837" observedRunningTime="2026-01-23 07:12:38.738260189 +0000 UTC m=+1109.558609164" watchObservedRunningTime="2026-01-23 07:12:38.740904909 +0000 UTC m=+1109.561253884" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.745929 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/rabbitmq-cluster-operator-manager-668c99d594-cdzmd" podStartSLOduration=3.44381447 podStartE2EDuration="39.745915032s" podCreationTimestamp="2026-01-23 07:11:59 +0000 UTC" firstStartedPulling="2026-01-23 07:12:01.054759847 +0000 UTC m=+1071.875108822" lastFinishedPulling="2026-01-23 07:12:37.356860409 +0000 UTC m=+1108.177209384" observedRunningTime="2026-01-23 07:12:38.701704885 +0000 UTC m=+1109.522053860" watchObservedRunningTime="2026-01-23 07:12:38.745915032 +0000 UTC m=+1109.566264007" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.769323 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" podStartSLOduration=4.605374234 podStartE2EDuration="40.769297893s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:01.067365944 +0000 UTC m=+1071.887714919" lastFinishedPulling="2026-01-23 07:12:37.231289593 +0000 UTC m=+1108.051638578" observedRunningTime="2026-01-23 07:12:38.763951581 +0000 UTC m=+1109.584300556" watchObservedRunningTime="2026-01-23 07:12:38.769297893 +0000 UTC m=+1109.589646868" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.800166 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" podStartSLOduration=31.984454912 podStartE2EDuration="40.800131063s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:28.535139414 +0000 UTC m=+1099.355488389" lastFinishedPulling="2026-01-23 07:12:37.350815555 +0000 UTC m=+1108.171164540" observedRunningTime="2026-01-23 07:12:38.798993729 +0000 UTC m=+1109.619342704" watchObservedRunningTime="2026-01-23 07:12:38.800131063 +0000 UTC m=+1109.620480038" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.828698 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" podStartSLOduration=14.201649212 podStartE2EDuration="40.828673323s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.786248434 +0000 UTC m=+1071.606597399" lastFinishedPulling="2026-01-23 07:12:27.413272535 +0000 UTC m=+1098.233621510" observedRunningTime="2026-01-23 07:12:38.827636542 +0000 UTC m=+1109.647985527" watchObservedRunningTime="2026-01-23 07:12:38.828673323 +0000 UTC m=+1109.649022298" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.897342 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" podStartSLOduration=40.897316925 podStartE2EDuration="40.897316925s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:12:38.892441795 +0000 UTC m=+1109.712790770" watchObservedRunningTime="2026-01-23 07:12:38.897316925 +0000 UTC m=+1109.717665900" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.927011 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" podStartSLOduration=14.544769815 podStartE2EDuration="40.926994629s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:01.015128789 +0000 UTC m=+1071.835477764" lastFinishedPulling="2026-01-23 07:12:27.397353603 +0000 UTC m=+1098.217702578" observedRunningTime="2026-01-23 07:12:38.923318996 +0000 UTC m=+1109.743667971" watchObservedRunningTime="2026-01-23 07:12:38.926994629 +0000 UTC m=+1109.747343604" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.958958 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" podStartSLOduration=5.9357231200000005 podStartE2EDuration="40.958936922s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:01.028644865 +0000 UTC m=+1071.848993840" lastFinishedPulling="2026-01-23 07:12:36.051858667 +0000 UTC m=+1106.872207642" observedRunningTime="2026-01-23 07:12:38.955822447 +0000 UTC m=+1109.776171422" watchObservedRunningTime="2026-01-23 07:12:38.958936922 +0000 UTC m=+1109.779285887" Jan 23 07:12:38 crc kubenswrapper[5102]: I0123 07:12:38.992604 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" podStartSLOduration=4.826222132 podStartE2EDuration="40.992578537s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:01.074898951 +0000 UTC m=+1071.895247926" lastFinishedPulling="2026-01-23 07:12:37.241255356 +0000 UTC m=+1108.061604331" observedRunningTime="2026-01-23 07:12:38.98578688 +0000 UTC m=+1109.806135855" watchObservedRunningTime="2026-01-23 07:12:38.992578537 +0000 UTC m=+1109.812927512" Jan 23 07:12:39 crc kubenswrapper[5102]: I0123 07:12:39.061043 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" podStartSLOduration=32.224011073 podStartE2EDuration="41.061019862s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:28.537009093 +0000 UTC m=+1099.357358068" lastFinishedPulling="2026-01-23 07:12:37.374017882 +0000 UTC m=+1108.194366857" observedRunningTime="2026-01-23 07:12:39.056499275 +0000 UTC m=+1109.876848250" watchObservedRunningTime="2026-01-23 07:12:39.061019862 +0000 UTC m=+1109.881368837" Jan 23 07:12:39 crc kubenswrapper[5102]: I0123 07:12:39.570023 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" event={"ID":"750c537e-8dea-47b4-883c-c30a44e5f48c","Type":"ContainerStarted","Data":"2f051e6607761323ac95477666ba7365b0ee5a178aed41ae38a2d6f0e4c694d3"} Jan 23 07:12:39 crc kubenswrapper[5102]: I0123 07:12:39.581624 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" Jan 23 07:12:39 crc kubenswrapper[5102]: I0123 07:12:39.598644 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" podStartSLOduration=3.43649257 podStartE2EDuration="41.598612922s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.937819816 +0000 UTC m=+1071.758168791" lastFinishedPulling="2026-01-23 07:12:39.099940168 +0000 UTC m=+1109.920289143" observedRunningTime="2026-01-23 07:12:39.587466922 +0000 UTC m=+1110.407815897" watchObservedRunningTime="2026-01-23 07:12:39.598612922 +0000 UTC m=+1110.418961897" Jan 23 07:12:43 crc kubenswrapper[5102]: I0123 07:12:43.815781 5102 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-zvcbn container/openshift-config-operator namespace/openshift-config-operator: Liveness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 07:12:43 crc kubenswrapper[5102]: I0123 07:12:43.816387 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" podUID="4f3ef19c-f82b-444d-9133-364448e010c2" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:12:43 crc kubenswrapper[5102]: I0123 07:12:43.815906 5102 patch_prober.go:28] interesting pod/openshift-config-operator-7777fb866f-zvcbn container/openshift-config-operator namespace/openshift-config-operator: Readiness probe status=failure output="Get \"https://10.217.0.35:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Jan 23 07:12:43 crc kubenswrapper[5102]: I0123 07:12:43.816554 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zvcbn" podUID="4f3ef19c-f82b-444d-9133-364448e010c2" containerName="openshift-config-operator" probeResult="failure" output="Get \"https://10.217.0.35:8443/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:12:44 crc kubenswrapper[5102]: I0123 07:12:44.715764 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/infra-operator-controller-manager-54ccf4f85d-lwblf" Jan 23 07:12:45 crc kubenswrapper[5102]: I0123 07:12:45.110417 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc" Jan 23 07:12:45 crc kubenswrapper[5102]: I0123 07:12:45.755373 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" event={"ID":"9c38e15e-2ddd-473c-892a-59aa9978e12c","Type":"ContainerStarted","Data":"b6d4c2e67341b8fe45d4f386828f72122ebae66ffcb86a59f973ef7362dbfd48"} Jan 23 07:12:45 crc kubenswrapper[5102]: I0123 07:12:45.756364 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" Jan 23 07:12:45 crc kubenswrapper[5102]: I0123 07:12:45.785479 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" podStartSLOduration=3.596684981 podStartE2EDuration="47.785447305s" podCreationTimestamp="2026-01-23 07:11:58 +0000 UTC" firstStartedPulling="2026-01-23 07:12:00.811379335 +0000 UTC m=+1071.631728310" lastFinishedPulling="2026-01-23 07:12:45.000141629 +0000 UTC m=+1115.820490634" observedRunningTime="2026-01-23 07:12:45.779833744 +0000 UTC m=+1116.600182739" watchObservedRunningTime="2026-01-23 07:12:45.785447305 +0000 UTC m=+1116.605796280" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.499634 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/cinder-operator-controller-manager-69cf5d4557-wb6dk" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.549301 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/designate-operator-controller-manager-b45d7bf98-mth4h" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.584975 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/glance-operator-controller-manager-78fdd796fd-n79vx" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.634582 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/horizon-operator-controller-manager-77d5c5b54f-2cmr7" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.694765 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ironic-operator-controller-manager-69d6c9f5b8-wcdgn" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.938689 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/keystone-operator-controller-manager-b8b6d4659-52bz9" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.960441 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/manila-operator-controller-manager-78c6999f6f-rs5gj" Jan 23 07:12:48 crc kubenswrapper[5102]: I0123 07:12:48.977873 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/mariadb-operator-controller-manager-c87fff755-j6bsb" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.015492 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/neutron-operator-controller-manager-5d8f59fb49-b78cx" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.041759 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/nova-operator-controller-manager-6b8bc8d87d-p4czx" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.096603 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/octavia-operator-controller-manager-7bd9774b6-nwpwk" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.258256 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/ovn-operator-controller-manager-55db956ddc-bmq5z" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.442695 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/telemetry-operator-controller-manager-85cd9769bb-7nxxd" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.472169 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/placement-operator-controller-manager-5d646b7d76-j6qxz" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.535804 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/swift-operator-controller-manager-547cbdb99f-6js2g" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.671359 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/test-operator-controller-manager-69797bbcbd-jhfvr" Jan 23 07:12:49 crc kubenswrapper[5102]: I0123 07:12:49.730754 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/watcher-operator-controller-manager-5ffb9c6597-hxkkl" Jan 23 07:12:51 crc kubenswrapper[5102]: I0123 07:12:51.646289 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/openstack-operator-controller-manager-57c46955cf-k49t5" Jan 23 07:12:58 crc kubenswrapper[5102]: I0123 07:12:58.608500 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack-operators/heat-operator-controller-manager-594c8c9d5d-mjxxp" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.164504 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-hfzdr"] Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.166884 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.172284 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"kube-root-ca.crt" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.172495 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"dnsmasq-dns-dockercfg-29wn9" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.172646 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.172833 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openshift-service-ca.crt" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.184177 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-config\") pod \"dnsmasq-dns-84bb9d8bd9-hfzdr\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.184256 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v954c\" (UniqueName: \"kubernetes.io/projected/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-kube-api-access-v954c\") pod \"dnsmasq-dns-84bb9d8bd9-hfzdr\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.189079 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-hfzdr"] Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.219765 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-xxrjx"] Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.221384 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.231852 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-svc" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.240232 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-xxrjx"] Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.285500 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-config\") pod \"dnsmasq-dns-84bb9d8bd9-hfzdr\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.285750 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v954c\" (UniqueName: \"kubernetes.io/projected/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-kube-api-access-v954c\") pod \"dnsmasq-dns-84bb9d8bd9-hfzdr\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.286888 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-config\") pod \"dnsmasq-dns-84bb9d8bd9-hfzdr\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.311070 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v954c\" (UniqueName: \"kubernetes.io/projected/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-kube-api-access-v954c\") pod \"dnsmasq-dns-84bb9d8bd9-hfzdr\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.387911 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ngw5h\" (UniqueName: \"kubernetes.io/projected/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-kube-api-access-ngw5h\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.388277 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-config\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.388497 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-dns-svc\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.490444 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-config\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.490522 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-dns-svc\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.490879 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ngw5h\" (UniqueName: \"kubernetes.io/projected/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-kube-api-access-ngw5h\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.491438 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-config\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.491464 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-dns-svc\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.510447 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ngw5h\" (UniqueName: \"kubernetes.io/projected/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-kube-api-access-ngw5h\") pod \"dnsmasq-dns-5f854695bc-xxrjx\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.549782 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.563780 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.841978 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-hfzdr"] Jan 23 07:13:16 crc kubenswrapper[5102]: I0123 07:13:16.912922 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-xxrjx"] Jan 23 07:13:17 crc kubenswrapper[5102]: I0123 07:13:17.045592 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" event={"ID":"bc5fa4f3-0e35-4351-be5e-0c662e042eb4","Type":"ContainerStarted","Data":"3f3f8a89901defda8287ec8490fd7e6135a67e96bac9214314fdfd65e0422766"} Jan 23 07:13:17 crc kubenswrapper[5102]: I0123 07:13:17.048149 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" event={"ID":"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809","Type":"ContainerStarted","Data":"d2c97277e57fd051b396150d90236183180ad2a6a5cbdc567c5f5f22fb88793e"} Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.180878 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-xxrjx"] Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.218234 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-lfcqw"] Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.219870 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.236119 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-lfcqw"] Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.322049 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-config\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.322110 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-txx4q\" (UniqueName: \"kubernetes.io/projected/38a915fe-37e0-4a5c-9343-9944308b2172-kube-api-access-txx4q\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.322193 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.423918 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.424028 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-config\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.424067 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-txx4q\" (UniqueName: \"kubernetes.io/projected/38a915fe-37e0-4a5c-9343-9944308b2172-kube-api-access-txx4q\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.425477 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-dns-svc\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.425517 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-config\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.449066 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-txx4q\" (UniqueName: \"kubernetes.io/projected/38a915fe-37e0-4a5c-9343-9944308b2172-kube-api-access-txx4q\") pod \"dnsmasq-dns-744ffd65bc-lfcqw\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.545115 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:18 crc kubenswrapper[5102]: W0123 07:13:18.854234 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod38a915fe_37e0_4a5c_9343_9944308b2172.slice/crio-276f6f3c32e8e4901ebe18de2cde66caf57833b4ae9f388f1a19b2188b275a79 WatchSource:0}: Error finding container 276f6f3c32e8e4901ebe18de2cde66caf57833b4ae9f388f1a19b2188b275a79: Status 404 returned error can't find the container with id 276f6f3c32e8e4901ebe18de2cde66caf57833b4ae9f388f1a19b2188b275a79 Jan 23 07:13:18 crc kubenswrapper[5102]: I0123 07:13:18.868402 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-lfcqw"] Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.016287 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-hfzdr"] Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.058462 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-scxcj"] Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.060500 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.082053 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-scxcj"] Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.105690 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" event={"ID":"38a915fe-37e0-4a5c-9343-9944308b2172","Type":"ContainerStarted","Data":"276f6f3c32e8e4901ebe18de2cde66caf57833b4ae9f388f1a19b2188b275a79"} Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.137434 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-config\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.137502 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xcl2k\" (UniqueName: \"kubernetes.io/projected/6e143893-c456-4bec-8400-d09686165f84-kube-api-access-xcl2k\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.137549 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-dns-svc\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.238891 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-config\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.238977 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xcl2k\" (UniqueName: \"kubernetes.io/projected/6e143893-c456-4bec-8400-d09686165f84-kube-api-access-xcl2k\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.239011 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-dns-svc\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.240075 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-dns-svc\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.240700 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-config\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.281847 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xcl2k\" (UniqueName: \"kubernetes.io/projected/6e143893-c456-4bec-8400-d09686165f84-kube-api-access-xcl2k\") pod \"dnsmasq-dns-95f5f6995-scxcj\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.391935 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.965124 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.966564 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.979770 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-server-conf" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.980084 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-erlang-cookie" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.982352 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-svc" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.982517 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-config-data" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.982646 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-plugins-conf" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.982755 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-default-user" Jan 23 07:13:19 crc kubenswrapper[5102]: I0123 07:13:19.983947 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-server-dockercfg-8ldmd" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:19.996920 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.054938 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-scxcj"] Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060678 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060754 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060797 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060833 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060865 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060884 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060902 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060921 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.060942 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qkxwl\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-kube-api-access-qkxwl\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.061077 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.061152 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.119420 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" event={"ID":"6e143893-c456-4bec-8400-d09686165f84","Type":"ContainerStarted","Data":"ba7f0ce7428b819bdcc84b2c674fb7d094c2b9d375f4f5ff42041957d7e9f8d3"} Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.160377 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164338 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164426 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164466 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164496 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qkxwl\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-kube-api-access-qkxwl\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164521 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164570 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164611 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164654 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164688 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164728 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.164761 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.166345 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-server-conf\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.167472 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.172092 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.184892 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-erlang-cookie-secret\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.187358 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-plugins\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.188019 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-erlang-cookie\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.190514 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") device mount path \"/mnt/openstack/pv01\"" pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.221150 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-tls\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.225321 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-plugins-conf\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.230619 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-erlang-cookie" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.230904 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-plugins-conf" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.233702 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-confd\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.234258 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-pod-info\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.236295 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qkxwl\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-kube-api-access-qkxwl\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.236289 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-server-conf" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.236381 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-rabbitmq-cell1-svc" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.237707 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-server-dockercfg-qbzt2" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.237915 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"rabbitmq-cell1-config-data" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.220318 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"rabbitmq-cell1-default-user" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.256600 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.267732 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.267833 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.267861 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.267891 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.267948 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.267977 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4gbz9\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-kube-api-access-4gbz9\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.268126 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.268485 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.268598 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.268680 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.268778 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.280021 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"rabbitmq-server-0\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.289976 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.371781 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372447 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-plugins\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372698 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372740 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372778 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372799 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372822 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372856 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372880 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372910 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372944 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.372963 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4gbz9\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-kube-api-access-4gbz9\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.373259 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") device mount path \"/mnt/openstack/pv07\"" pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.374372 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.376368 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-erlang-cookie\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.377118 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-server-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.388011 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-plugins-conf\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.389780 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-erlang-cookie-secret\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.393438 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-tls\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.397696 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-pod-info\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.398207 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-confd\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.403156 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4gbz9\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-kube-api-access-4gbz9\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.413785 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"rabbitmq-cell1-server-0\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.597935 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:13:20 crc kubenswrapper[5102]: I0123 07:13:20.739919 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 07:13:20 crc kubenswrapper[5102]: W0123 07:13:20.758770 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4fc3e1d_5fac_4696_a8eb_709db37b5ff6.slice/crio-751d5e8d8d75c2615358a9dfdc133b7afdcfdbfd251107083f8d63ea5c0c976d WatchSource:0}: Error finding container 751d5e8d8d75c2615358a9dfdc133b7afdcfdbfd251107083f8d63ea5c0c976d: Status 404 returned error can't find the container with id 751d5e8d8d75c2615358a9dfdc133b7afdcfdbfd251107083f8d63ea5c0c976d Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.027364 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 07:13:21 crc kubenswrapper[5102]: W0123 07:13:21.070400 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1ea732e7_d11d_4e12_9d44_f8fcafa50de5.slice/crio-a4627277241a6e04a36fd1dcf209a91d2523f1fbf8a1c0593cc639bf07bb470a WatchSource:0}: Error finding container a4627277241a6e04a36fd1dcf209a91d2523f1fbf8a1c0593cc639bf07bb470a: Status 404 returned error can't find the container with id a4627277241a6e04a36fd1dcf209a91d2523f1fbf8a1c0593cc639bf07bb470a Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.142710 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1ea732e7-d11d-4e12-9d44-f8fcafa50de5","Type":"ContainerStarted","Data":"a4627277241a6e04a36fd1dcf209a91d2523f1fbf8a1c0593cc639bf07bb470a"} Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.145675 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6","Type":"ContainerStarted","Data":"751d5e8d8d75c2615358a9dfdc133b7afdcfdbfd251107083f8d63ea5c0c976d"} Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.532095 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-galera-0"] Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.534279 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.541502 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-svc" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.543083 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-dockercfg-jwnc4" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.543276 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-scripts" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.543569 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config-data" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.546209 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.548213 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"combined-ca-bundle" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.602631 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.602706 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.602756 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-kolla-config\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.602803 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.603219 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxxtl\" (UniqueName: \"kubernetes.io/projected/1ede537b-39d8-483c-9a2d-4ace36319060-kube-api-access-kxxtl\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.603263 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.603326 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-default\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.603377 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704319 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxxtl\" (UniqueName: \"kubernetes.io/projected/1ede537b-39d8-483c-9a2d-4ace36319060-kube-api-access-kxxtl\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704382 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704416 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-default\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704557 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704597 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704619 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704662 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-kolla-config\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.704726 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.705445 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-generated\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.706713 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-default\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.706867 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-operator-scripts\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.707736 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-kolla-config\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.709320 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") device mount path \"/mnt/openstack/pv03\"" pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.721896 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-galera-tls-certs\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.742610 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-combined-ca-bundle\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.754670 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxxtl\" (UniqueName: \"kubernetes.io/projected/1ede537b-39d8-483c-9a2d-4ace36319060-kube-api-access-kxxtl\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.777725 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"openstack-galera-0\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " pod="openstack/openstack-galera-0" Jan 23 07:13:21 crc kubenswrapper[5102]: I0123 07:13:21.866097 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 23 07:13:22 crc kubenswrapper[5102]: I0123 07:13:22.632421 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-galera-0"] Jan 23 07:13:22 crc kubenswrapper[5102]: W0123 07:13:22.646159 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1ede537b_39d8_483c_9a2d_4ace36319060.slice/crio-0614d0b2ed67db0f09f8d59a72622b249e926e0294d6c40016ccf2f83dfbec87 WatchSource:0}: Error finding container 0614d0b2ed67db0f09f8d59a72622b249e926e0294d6c40016ccf2f83dfbec87: Status 404 returned error can't find the container with id 0614d0b2ed67db0f09f8d59a72622b249e926e0294d6c40016ccf2f83dfbec87 Jan 23 07:13:22 crc kubenswrapper[5102]: I0123 07:13:22.971641 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 07:13:22 crc kubenswrapper[5102]: I0123 07:13:22.994879 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:22 crc kubenswrapper[5102]: I0123 07:13:22.999038 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-config-data" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.000465 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"galera-openstack-cell1-dockercfg-smfmv" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.000679 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-galera-openstack-cell1-svc" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.005068 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-cell1-scripts" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.007866 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161584 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161644 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161695 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161730 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161766 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tfrrm\" (UniqueName: \"kubernetes.io/projected/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kube-api-access-tfrrm\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161800 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161842 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.161871 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.174501 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/memcached-0"] Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.179653 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.188977 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"memcached-memcached-dockercfg-t7zm8" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.189607 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-memcached-svc" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.189827 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"memcached-config-data" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.190682 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.236156 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1ede537b-39d8-483c-9a2d-4ace36319060","Type":"ContainerStarted","Data":"0614d0b2ed67db0f09f8d59a72622b249e926e0294d6c40016ccf2f83dfbec87"} Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.263840 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.263904 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.263955 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.263988 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.264021 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tfrrm\" (UniqueName: \"kubernetes.io/projected/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kube-api-access-tfrrm\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.264060 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.264101 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.264131 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.264348 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-generated\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.265825 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kolla-config\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.266030 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-default\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.266047 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") device mount path \"/mnt/openstack/pv12\"" pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.266807 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-operator-scripts\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.291509 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-combined-ca-bundle\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.305086 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tfrrm\" (UniqueName: \"kubernetes.io/projected/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kube-api-access-tfrrm\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.310967 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-galera-tls-certs\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.336950 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"openstack-cell1-galera-0\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.355114 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.366497 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kwwmw\" (UniqueName: \"kubernetes.io/projected/d0498339-2dc7-4527-8396-50bbd00b8443-kube-api-access-kwwmw\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.368133 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.368236 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.368278 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-kolla-config\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.368300 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-config-data\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.470724 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kwwmw\" (UniqueName: \"kubernetes.io/projected/d0498339-2dc7-4527-8396-50bbd00b8443-kube-api-access-kwwmw\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.470810 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.470862 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.470897 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-kolla-config\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.470918 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-config-data\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.478594 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-kolla-config\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.481588 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-config-data\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.498445 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-memcached-tls-certs\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.499765 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-combined-ca-bundle\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.506784 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kwwmw\" (UniqueName: \"kubernetes.io/projected/d0498339-2dc7-4527-8396-50bbd00b8443-kube-api-access-kwwmw\") pod \"memcached-0\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " pod="openstack/memcached-0" Jan 23 07:13:23 crc kubenswrapper[5102]: I0123 07:13:23.552520 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 23 07:13:24 crc kubenswrapper[5102]: I0123 07:13:24.029252 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 07:13:24 crc kubenswrapper[5102]: I0123 07:13:24.273254 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0e1fd671-9192-4406-b7ea-3a33b4cdec57","Type":"ContainerStarted","Data":"214cb08a2f4840bce6138becd379b9e3e1c2130ae49f9a8af25bd667c4e1d9eb"} Jan 23 07:13:24 crc kubenswrapper[5102]: I0123 07:13:24.366863 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/memcached-0"] Jan 23 07:13:24 crc kubenswrapper[5102]: W0123 07:13:24.419699 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd0498339_2dc7_4527_8396_50bbd00b8443.slice/crio-202d67de3f27e495bfa00ce900d66d430218f5ccba72ca8ee84ed2602b04b34e WatchSource:0}: Error finding container 202d67de3f27e495bfa00ce900d66d430218f5ccba72ca8ee84ed2602b04b34e: Status 404 returned error can't find the container with id 202d67de3f27e495bfa00ce900d66d430218f5ccba72ca8ee84ed2602b04b34e Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.184603 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.185935 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.196825 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.201658 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"telemetry-ceilometer-dockercfg-kcffj" Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.309729 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d0498339-2dc7-4527-8396-50bbd00b8443","Type":"ContainerStarted","Data":"202d67de3f27e495bfa00ce900d66d430218f5ccba72ca8ee84ed2602b04b34e"} Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.345974 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jsp8g\" (UniqueName: \"kubernetes.io/projected/9d9ac8cd-f88c-4828-a66b-19689f94104e-kube-api-access-jsp8g\") pod \"kube-state-metrics-0\" (UID: \"9d9ac8cd-f88c-4828-a66b-19689f94104e\") " pod="openstack/kube-state-metrics-0" Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.446686 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jsp8g\" (UniqueName: \"kubernetes.io/projected/9d9ac8cd-f88c-4828-a66b-19689f94104e-kube-api-access-jsp8g\") pod \"kube-state-metrics-0\" (UID: \"9d9ac8cd-f88c-4828-a66b-19689f94104e\") " pod="openstack/kube-state-metrics-0" Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.480418 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jsp8g\" (UniqueName: \"kubernetes.io/projected/9d9ac8cd-f88c-4828-a66b-19689f94104e-kube-api-access-jsp8g\") pod \"kube-state-metrics-0\" (UID: \"9d9ac8cd-f88c-4828-a66b-19689f94104e\") " pod="openstack/kube-state-metrics-0" Jan 23 07:13:25 crc kubenswrapper[5102]: I0123 07:13:25.530207 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:13:26 crc kubenswrapper[5102]: I0123 07:13:26.526925 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:13:26 crc kubenswrapper[5102]: W0123 07:13:26.551951 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9d9ac8cd_f88c_4828_a66b_19689f94104e.slice/crio-54f42f218385ae6b09490b2941b329da240aaea572513d1cb51a3706b7ab70ad WatchSource:0}: Error finding container 54f42f218385ae6b09490b2941b329da240aaea572513d1cb51a3706b7ab70ad: Status 404 returned error can't find the container with id 54f42f218385ae6b09490b2941b329da240aaea572513d1cb51a3706b7ab70ad Jan 23 07:13:27 crc kubenswrapper[5102]: I0123 07:13:27.362495 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d9ac8cd-f88c-4828-a66b-19689f94104e","Type":"ContainerStarted","Data":"54f42f218385ae6b09490b2941b329da240aaea572513d1cb51a3706b7ab70ad"} Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.256044 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.258150 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.266052 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-config" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.266052 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-nb-dockercfg-xgw7k" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.266268 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-nb-ovndbs" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.266366 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-nb-scripts" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.266509 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovn-metrics" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.278626 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438035 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438169 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438214 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438248 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438299 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438324 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438354 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-config\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.438392 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wm4jz\" (UniqueName: \"kubernetes.io/projected/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-kube-api-access-wm4jz\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.541972 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.542057 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.542117 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.542154 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.542179 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.542221 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-config\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.542274 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wm4jz\" (UniqueName: \"kubernetes.io/projected/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-kube-api-access-wm4jz\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.542336 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.688773 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") device mount path \"/mnt/openstack/pv04\"" pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.908654 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdb-rundir\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.908970 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-config\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.914971 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wm4jz\" (UniqueName: \"kubernetes.io/projected/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-kube-api-access-wm4jz\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.917823 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-combined-ca-bundle\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.918030 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-scripts\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.919012 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-metrics-certs-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.919163 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdbserver-nb-tls-certs\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:29 crc kubenswrapper[5102]: I0123 07:13:29.940429 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"ovsdbserver-nb-0\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.195390 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.443378 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-rkvv7"] Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.444399 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.450773 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovncontroller-ovndbs" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.450978 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-scripts" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.451576 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncontroller-ovncontroller-dockercfg-bntjf" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.461883 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-ovs-h9gtx"] Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.463732 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.466993 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rkvv7"] Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.486637 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-h9gtx"] Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.549854 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-etc-ovs\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550319 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run-ovn\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550363 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-log-ovn\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550389 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cqqsd\" (UniqueName: \"kubernetes.io/projected/fb784258-3999-4323-8ef6-06631e94e61f-kube-api-access-cqqsd\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550416 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-run\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550433 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-lib\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550481 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-btb7h\" (UniqueName: \"kubernetes.io/projected/ac05d076-9929-479c-b5be-43eed0ee2dcc-kube-api-access-btb7h\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550506 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fb784258-3999-4323-8ef6-06631e94e61f-scripts\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550531 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-ovn-controller-tls-certs\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550584 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550638 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-log\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550662 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac05d076-9929-479c-b5be-43eed0ee2dcc-scripts\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.550703 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-combined-ca-bundle\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652484 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-btb7h\" (UniqueName: \"kubernetes.io/projected/ac05d076-9929-479c-b5be-43eed0ee2dcc-kube-api-access-btb7h\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652557 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fb784258-3999-4323-8ef6-06631e94e61f-scripts\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652577 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-ovn-controller-tls-certs\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652608 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652625 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-log\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652642 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac05d076-9929-479c-b5be-43eed0ee2dcc-scripts\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652673 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-combined-ca-bundle\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652711 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-etc-ovs\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652736 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run-ovn\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652775 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-log-ovn\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652795 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqqsd\" (UniqueName: \"kubernetes.io/projected/fb784258-3999-4323-8ef6-06631e94e61f-kube-api-access-cqqsd\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652814 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-run\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.652830 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-lib\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.653432 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-lib\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.653666 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run-ovn\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.653796 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-log-ovn\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.653822 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-etc-ovs\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.654123 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-run\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.654941 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-log\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.656305 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fb784258-3999-4323-8ef6-06631e94e61f-scripts\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.656980 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-ovn-controller-tls-certs\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.657243 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.659967 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac05d076-9929-479c-b5be-43eed0ee2dcc-scripts\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.663425 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-combined-ca-bundle\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.670142 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-btb7h\" (UniqueName: \"kubernetes.io/projected/ac05d076-9929-479c-b5be-43eed0ee2dcc-kube-api-access-btb7h\") pod \"ovn-controller-ovs-h9gtx\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.672743 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqqsd\" (UniqueName: \"kubernetes.io/projected/fb784258-3999-4323-8ef6-06631e94e61f-kube-api-access-cqqsd\") pod \"ovn-controller-rkvv7\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.773212 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:30 crc kubenswrapper[5102]: I0123 07:13:30.789051 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.252413 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rkvv7"] Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.257839 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 07:13:31 crc kubenswrapper[5102]: W0123 07:13:31.270366 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4c43e79a_0827_4f25_a2b4_9b53ec46f96f.slice/crio-b2882d618cb10293f64f9242e20d2b2d712b4391c1c3a826bf040e1ada6c3f21 WatchSource:0}: Error finding container b2882d618cb10293f64f9242e20d2b2d712b4391c1c3a826bf040e1ada6c3f21: Status 404 returned error can't find the container with id b2882d618cb10293f64f9242e20d2b2d712b4391c1c3a826bf040e1ada6c3f21 Jan 23 07:13:31 crc kubenswrapper[5102]: W0123 07:13:31.273049 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb784258_3999_4323_8ef6_06631e94e61f.slice/crio-8d0cdec117560c25624d9a9ccb470c3f38fe338c2b82e706e69159a8717a8b67 WatchSource:0}: Error finding container 8d0cdec117560c25624d9a9ccb470c3f38fe338c2b82e706e69159a8717a8b67: Status 404 returned error can't find the container with id 8d0cdec117560c25624d9a9ccb470c3f38fe338c2b82e706e69159a8717a8b67 Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.500719 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4c43e79a-0827-4f25-a2b4-9b53ec46f96f","Type":"ContainerStarted","Data":"b2882d618cb10293f64f9242e20d2b2d712b4391c1c3a826bf040e1ada6c3f21"} Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.511981 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rkvv7" event={"ID":"fb784258-3999-4323-8ef6-06631e94e61f","Type":"ContainerStarted","Data":"8d0cdec117560c25624d9a9ccb470c3f38fe338c2b82e706e69159a8717a8b67"} Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.631974 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-ovs-h9gtx"] Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.870048 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-metrics-hqzgg"] Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.873407 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.875956 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-metrics-config" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.891283 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-hqzgg"] Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.974498 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovn-rundir\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.974614 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovs-rundir\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.974653 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w29tf\" (UniqueName: \"kubernetes.io/projected/df64b95b-fb03-49b3-b9e2-7d064e39c71b-kube-api-access-w29tf\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.974828 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-combined-ca-bundle\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.975046 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:31 crc kubenswrapper[5102]: I0123 07:13:31.975102 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df64b95b-fb03-49b3-b9e2-7d064e39c71b-config\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.022745 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-lfcqw"] Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.054118 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-7878659675-jvvft"] Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.055719 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.061040 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-nb" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.069812 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jvvft"] Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.078000 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovn-rundir\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.078314 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovs-rundir\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.078383 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w29tf\" (UniqueName: \"kubernetes.io/projected/df64b95b-fb03-49b3-b9e2-7d064e39c71b-kube-api-access-w29tf\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.078453 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-combined-ca-bundle\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.078831 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.078874 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df64b95b-fb03-49b3-b9e2-7d064e39c71b-config\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.080626 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovn-rundir\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.080702 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovs-rundir\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.080899 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df64b95b-fb03-49b3-b9e2-7d064e39c71b-config\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.096559 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-metrics-certs-tls-certs\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.096984 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-combined-ca-bundle\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.103384 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w29tf\" (UniqueName: \"kubernetes.io/projected/df64b95b-fb03-49b3-b9e2-7d064e39c71b-kube-api-access-w29tf\") pod \"ovn-controller-metrics-hqzgg\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.180923 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.180982 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-dns-svc\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.181007 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rhbfp\" (UniqueName: \"kubernetes.io/projected/f0e93134-2a54-4f11-8afa-1924a7f53c3e-kube-api-access-rhbfp\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.181049 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-config\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.212409 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.282481 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.282888 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-dns-svc\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.282971 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rhbfp\" (UniqueName: \"kubernetes.io/projected/f0e93134-2a54-4f11-8afa-1924a7f53c3e-kube-api-access-rhbfp\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.283106 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-config\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.283767 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-ovsdbserver-nb\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.284085 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-dns-svc\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.284477 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-config\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.314104 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rhbfp\" (UniqueName: \"kubernetes.io/projected/f0e93134-2a54-4f11-8afa-1924a7f53c3e-kube-api-access-rhbfp\") pod \"dnsmasq-dns-7878659675-jvvft\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:32 crc kubenswrapper[5102]: I0123 07:13:32.386317 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.360314 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.362669 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.365902 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovndbcluster-sb-ovndbs" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.366186 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-config" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.366234 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovncluster-ovndbcluster-sb-dockercfg-b2tnq" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.366498 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovndbcluster-sb-scripts" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.422670 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515459 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515558 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k6n2h\" (UniqueName: \"kubernetes.io/projected/2f7956cc-1c1c-410f-94f8-86feb62d9124-kube-api-access-k6n2h\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515592 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515613 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515643 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515724 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-config\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515826 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.515870 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.617728 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.617837 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k6n2h\" (UniqueName: \"kubernetes.io/projected/2f7956cc-1c1c-410f-94f8-86feb62d9124-kube-api-access-k6n2h\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.617888 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.617926 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.617967 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.618017 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-config\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.618069 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.618118 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.619360 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") device mount path \"/mnt/openstack/pv02\"" pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.619791 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdb-rundir\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.621772 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-config\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.627009 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-combined-ca-bundle\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.628057 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-metrics-certs-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.631428 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdbserver-sb-tls-certs\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.635965 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-scripts\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.641521 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k6n2h\" (UniqueName: \"kubernetes.io/projected/2f7956cc-1c1c-410f-94f8-86feb62d9124-kube-api-access-k6n2h\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.648299 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"ovsdbserver-sb-0\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:33 crc kubenswrapper[5102]: I0123 07:13:33.715375 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 23 07:13:40 crc kubenswrapper[5102]: I0123 07:13:40.593787 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-h9gtx" event={"ID":"ac05d076-9929-479c-b5be-43eed0ee2dcc","Type":"ContainerStarted","Data":"dee3cf005ffe767982b2518d6f239519e3e383a59fbddc7ef88bfb968bb3b207"} Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.518323 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:e733252aab7f4bc0efbdd712bcd88e44c5498bf1773dba843bc9dcfac324fe3d" Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.519060 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:e733252aab7f4bc0efbdd712bcd88e44c5498bf1773dba843bc9dcfac324fe3d,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-4gbz9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-cell1-server-0_openstack(1ea732e7-d11d-4e12-9d44-f8fcafa50de5): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.520217 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-cell1-server-0" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.541462 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:e733252aab7f4bc0efbdd712bcd88e44c5498bf1773dba843bc9dcfac324fe3d" Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.541880 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:setup-container,Image:quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:e733252aab7f4bc0efbdd712bcd88e44c5498bf1773dba843bc9dcfac324fe3d,Command:[sh -c cp /tmp/erlang-cookie-secret/.erlang.cookie /var/lib/rabbitmq/.erlang.cookie && chmod 600 /var/lib/rabbitmq/.erlang.cookie ; cp /tmp/rabbitmq-plugins/enabled_plugins /operator/enabled_plugins ; echo '[default]' > /var/lib/rabbitmq/.rabbitmqadmin.conf && sed -e 's/default_user/username/' -e 's/default_pass/password/' /tmp/default_user.conf >> /var/lib/rabbitmq/.rabbitmqadmin.conf && chmod 600 /var/lib/rabbitmq/.rabbitmqadmin.conf ; sleep 30],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Requests:ResourceList{cpu: {{20 -3} {} 20m DecimalSI},memory: {{67108864 0} {} BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:plugins-conf,ReadOnly:false,MountPath:/tmp/rabbitmq-plugins/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-erlang-cookie,ReadOnly:false,MountPath:/var/lib/rabbitmq/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:erlang-cookie-secret,ReadOnly:false,MountPath:/tmp/erlang-cookie-secret/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-plugins,ReadOnly:false,MountPath:/operator,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:persistence,ReadOnly:false,MountPath:/var/lib/rabbitmq/mnesia/,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:rabbitmq-confd,ReadOnly:false,MountPath:/tmp/default_user.conf,SubPath:default_user.conf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-qkxwl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod rabbitmq-server-0_openstack(f4fc3e1d-5fac-4696-a8eb-709db37b5ff6): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.543117 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/rabbitmq-server-0" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.626182 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:e733252aab7f4bc0efbdd712bcd88e44c5498bf1773dba843bc9dcfac324fe3d\\\"\"" pod="openstack/rabbitmq-cell1-server-0" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" Jan 23 07:13:43 crc kubenswrapper[5102]: E0123 07:13:43.628036 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"setup-container\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-rabbitmq@sha256:e733252aab7f4bc0efbdd712bcd88e44c5498bf1773dba843bc9dcfac324fe3d\\\"\"" pod="openstack/rabbitmq-server-0" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" Jan 23 07:13:45 crc kubenswrapper[5102]: E0123 07:13:45.464065 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13" Jan 23 07:13:45 crc kubenswrapper[5102]: E0123 07:13:45.464684 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kxxtl,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-galera-0_openstack(1ede537b-39d8-483c-9a2d-4ace36319060): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:45 crc kubenswrapper[5102]: E0123 07:13:45.465838 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-galera-0" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" Jan 23 07:13:45 crc kubenswrapper[5102]: E0123 07:13:45.640894 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13\\\"\"" pod="openstack/openstack-galera-0" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" Jan 23 07:13:46 crc kubenswrapper[5102]: E0123 07:13:46.291787 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-memcached@sha256:e47191ba776414b781b3e27b856ab45a03b9480c7dc2b1addb939608794882dc" Jan 23 07:13:46 crc kubenswrapper[5102]: E0123 07:13:46.292051 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:memcached,Image:quay.io/podified-antelope-centos9/openstack-memcached@sha256:e47191ba776414b781b3e27b856ab45a03b9480c7dc2b1addb939608794882dc,Command:[/usr/bin/dumb-init -- /usr/local/bin/kolla_start],Args:[],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:memcached,HostPort:0,ContainerPort:11211,Protocol:TCP,HostIP:,},ContainerPort{Name:memcached-tls,HostPort:0,ContainerPort:11212,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},EnvVar{Name:POD_IPS,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIPs,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},EnvVar{Name:CONFIG_HASH,Value:n54fh567h549h655h558h695h8ch5bh649h56dh66fh79h68bh5fch677h65bh588h9ch559h597h65h69hb9h5bbh67ch544h675hf7h58chd8h695h5d7q,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/src,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/certs/memcached.crt,SubPath:tls.crt,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:memcached-tls-certs,ReadOnly:true,MountPath:/var/lib/config-data/tls/private/memcached.key,SubPath:tls.key,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-kwwmw,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:3,TimeoutSeconds:5,PeriodSeconds:3,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},ReadinessProbe:&Probe{ProbeHandler:ProbeHandler{Exec:nil,HTTPGet:nil,TCPSocket:&TCPSocketAction{Port:{0 11211 },Host:,},GRPC:nil,},InitialDelaySeconds:5,TimeoutSeconds:5,PeriodSeconds:5,SuccessThreshold:1,FailureThreshold:3,TerminationGracePeriodSeconds:nil,},Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42457,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42457,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod memcached-0_openstack(d0498339-2dc7-4527-8396-50bbd00b8443): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:46 crc kubenswrapper[5102]: E0123 07:13:46.293283 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/memcached-0" podUID="d0498339-2dc7-4527-8396-50bbd00b8443" Jan 23 07:13:46 crc kubenswrapper[5102]: E0123 07:13:46.656513 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"memcached\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-memcached@sha256:e47191ba776414b781b3e27b856ab45a03b9480c7dc2b1addb939608794882dc\\\"\"" pod="openstack/memcached-0" podUID="d0498339-2dc7-4527-8396-50bbd00b8443" Jan 23 07:13:51 crc kubenswrapper[5102]: E0123 07:13:51.197633 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13" Jan 23 07:13:51 crc kubenswrapper[5102]: E0123 07:13:51.199202 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:mysql-bootstrap,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[bash /var/lib/operator-scripts/mysql_bootstrap.sh],Args:[],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:True,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:mysql-db,ReadOnly:false,MountPath:/var/lib/mysql,SubPath:mysql,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-default,ReadOnly:true,MountPath:/var/lib/config-data/default,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data-generated,ReadOnly:false,MountPath:/var/lib/config-data/generated,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:operator-scripts,ReadOnly:true,MountPath:/var/lib/operator-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kolla-config,ReadOnly:true,MountPath:/var/lib/kolla/config_files,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-tfrrm,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod openstack-cell1-galera-0_openstack(0e1fd671-9192-4406-b7ea-3a33b4cdec57): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:51 crc kubenswrapper[5102]: E0123 07:13:51.200392 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/openstack-cell1-galera-0" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" Jan 23 07:13:51 crc kubenswrapper[5102]: E0123 07:13:51.713238 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mysql-bootstrap\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13\\\"\"" pod="openstack/openstack-cell1-galera-0" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.258675 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.260034 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n659h4h664hbh658h587h67ch89h587h8fh679hc6hf9h55fh644h5d5h698h68dh5cdh5ffh669h54ch9h689hb8hd4h5bfhd8h5d7h5fh665h574q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-xcl2k,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-95f5f6995-scxcj_openstack(6e143893-c456-4bec-8400-d09686165f84): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.263456 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" podUID="6e143893-c456-4bec-8400-d09686165f84" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.274658 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.274911 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:n68chd6h679hbfh55fhc6h5ffh5d8h94h56ch589hb4hc5h57bh677hcdh655h8dh667h675h654h66ch567h8fh659h5b4h675h566h55bh54h67dh6dq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-txx4q,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-744ffd65bc-lfcqw_openstack(38a915fe-37e0-4a5c-9343-9944308b2172): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.276156 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" podUID="38a915fe-37e0-4a5c-9343-9944308b2172" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.310705 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.311333 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:ndfhb5h667h568h584h5f9h58dh565h664h587h597h577h64bh5c4h66fh647hbdh68ch5c5h68dh686h5f7h64hd7hc6h55fh57bh98h57fh87h5fh57fq,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:dns-svc,ReadOnly:true,MountPath:/etc/dnsmasq.d/hosts/dns-svc,SubPath:dns-svc,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-ngw5h,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-5f854695bc-xxrjx_openstack(b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.312841 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" podUID="b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.327306 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.327499 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:init,Image:quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33,Command:[/bin/bash],Args:[-c dnsmasq --interface=* --conf-dir=/etc/dnsmasq.d --hostsdir=/etc/dnsmasq.d/hosts --keep-in-foreground --log-debug --bind-interfaces --listen-address=$(POD_IP) --port 5353 --log-facility=- --no-hosts --domain-needed --no-resolv --bogus-priv --log-queries --test],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:CONFIG_HASH,Value:nffh5bdhf4h5f8h79h55h77h58fh56dh7bh6fh578hbch55dh68h56bhd9h65dh57ch658hc9h566h666h688h58h65dh684h5d7h6ch575h5d6h88q,ValueFrom:nil,},EnvVar{Name:POD_IP,Value:,ValueFrom:&EnvVarSource{FieldRef:&ObjectFieldSelector{APIVersion:v1,FieldPath:status.podIP,},ResourceFieldRef:nil,ConfigMapKeyRef:nil,SecretKeyRef:nil,},},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:config,ReadOnly:true,MountPath:/etc/dnsmasq.d/config.cfg,SubPath:dns,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-v954c,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000650000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod dnsmasq-dns-84bb9d8bd9-hfzdr_openstack(bc5fa4f3-0e35-4351-be5e-0c662e042eb4): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.329083 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" podUID="bc5fa4f3-0e35-4351-be5e-0c662e042eb4" Jan 23 07:13:53 crc kubenswrapper[5102]: E0123 07:13:53.726868 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"init\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-neutron-server@sha256:ea0bf67f1aa5d95a9a07b9c8692c293470f1311792c55d3d57f1f92e56689c33\\\"\"" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" podUID="6e143893-c456-4bec-8400-d09686165f84" Jan 23 07:13:53 crc kubenswrapper[5102]: I0123 07:13:53.741634 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jvvft"] Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.264612 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.424696 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngw5h\" (UniqueName: \"kubernetes.io/projected/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-kube-api-access-ngw5h\") pod \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.424873 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-config\") pod \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.424990 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-dns-svc\") pod \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\" (UID: \"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.425581 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809" (UID: "b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.426078 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-config" (OuterVolumeSpecName: "config") pod "b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809" (UID: "b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.432913 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-kube-api-access-ngw5h" (OuterVolumeSpecName: "kube-api-access-ngw5h") pod "b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809" (UID: "b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809"). InnerVolumeSpecName "kube-api-access-ngw5h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.529773 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.529897 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngw5h\" (UniqueName: \"kubernetes.io/projected/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-kube-api-access-ngw5h\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.529921 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.531891 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.578470 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-metrics-hqzgg"] Jan 23 07:13:54 crc kubenswrapper[5102]: W0123 07:13:54.604723 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddf64b95b_fb03_49b3_b9e2_7d064e39c71b.slice/crio-f3550d5d54284bec1edb547d932f3d06b05a9be1ac8ac1d1a7b18b179a553b07 WatchSource:0}: Error finding container f3550d5d54284bec1edb547d932f3d06b05a9be1ac8ac1d1a7b18b179a553b07: Status 404 returned error can't find the container with id f3550d5d54284bec1edb547d932f3d06b05a9be1ac8ac1d1a7b18b179a553b07 Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.604919 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.631172 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-config\") pod \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.631469 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v954c\" (UniqueName: \"kubernetes.io/projected/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-kube-api-access-v954c\") pod \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\" (UID: \"bc5fa4f3-0e35-4351-be5e-0c662e042eb4\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.632069 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-config" (OuterVolumeSpecName: "config") pod "bc5fa4f3-0e35-4351-be5e-0c662e042eb4" (UID: "bc5fa4f3-0e35-4351-be5e-0c662e042eb4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.640419 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-kube-api-access-v954c" (OuterVolumeSpecName: "kube-api-access-v954c") pod "bc5fa4f3-0e35-4351-be5e-0c662e042eb4" (UID: "bc5fa4f3-0e35-4351-be5e-0c662e042eb4"). InnerVolumeSpecName "kube-api-access-v954c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.658133 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.732901 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-config\") pod \"38a915fe-37e0-4a5c-9343-9944308b2172\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.733063 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-dns-svc\") pod \"38a915fe-37e0-4a5c-9343-9944308b2172\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.733128 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-txx4q\" (UniqueName: \"kubernetes.io/projected/38a915fe-37e0-4a5c-9343-9944308b2172-kube-api-access-txx4q\") pod \"38a915fe-37e0-4a5c-9343-9944308b2172\" (UID: \"38a915fe-37e0-4a5c-9343-9944308b2172\") " Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.733530 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.733567 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v954c\" (UniqueName: \"kubernetes.io/projected/bc5fa4f3-0e35-4351-be5e-0c662e042eb4-kube-api-access-v954c\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.733589 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-config" (OuterVolumeSpecName: "config") pod "38a915fe-37e0-4a5c-9343-9944308b2172" (UID: "38a915fe-37e0-4a5c-9343-9944308b2172"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.733749 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "38a915fe-37e0-4a5c-9343-9944308b2172" (UID: "38a915fe-37e0-4a5c-9343-9944308b2172"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.735038 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-hqzgg" event={"ID":"df64b95b-fb03-49b3-b9e2-7d064e39c71b","Type":"ContainerStarted","Data":"f3550d5d54284bec1edb547d932f3d06b05a9be1ac8ac1d1a7b18b179a553b07"} Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.736371 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-jvvft" event={"ID":"f0e93134-2a54-4f11-8afa-1924a7f53c3e","Type":"ContainerStarted","Data":"ef44c4900bdd8daf717d77d70b3e1650cabe059c2183961699849076b903d194"} Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.737768 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/38a915fe-37e0-4a5c-9343-9944308b2172-kube-api-access-txx4q" (OuterVolumeSpecName: "kube-api-access-txx4q") pod "38a915fe-37e0-4a5c-9343-9944308b2172" (UID: "38a915fe-37e0-4a5c-9343-9944308b2172"). InnerVolumeSpecName "kube-api-access-txx4q". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.738053 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.738820 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-84bb9d8bd9-hfzdr" event={"ID":"bc5fa4f3-0e35-4351-be5e-0c662e042eb4","Type":"ContainerDied","Data":"3f3f8a89901defda8287ec8490fd7e6135a67e96bac9214314fdfd65e0422766"} Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.740160 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" event={"ID":"b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809","Type":"ContainerDied","Data":"d2c97277e57fd051b396150d90236183180ad2a6a5cbdc567c5f5f22fb88793e"} Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.740188 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5f854695bc-xxrjx" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.743286 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" event={"ID":"38a915fe-37e0-4a5c-9343-9944308b2172","Type":"ContainerDied","Data":"276f6f3c32e8e4901ebe18de2cde66caf57833b4ae9f388f1a19b2188b275a79"} Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.743345 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-744ffd65bc-lfcqw" Jan 23 07:13:54 crc kubenswrapper[5102]: W0123 07:13:54.773957 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2f7956cc_1c1c_410f_94f8_86feb62d9124.slice/crio-8dc48cca7d31f7dccd997c7fd879f5ba7f70557f7c5a18f7fe00dbef48252883 WatchSource:0}: Error finding container 8dc48cca7d31f7dccd997c7fd879f5ba7f70557f7c5a18f7fe00dbef48252883: Status 404 returned error can't find the container with id 8dc48cca7d31f7dccd997c7fd879f5ba7f70557f7c5a18f7fe00dbef48252883 Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.820491 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-hfzdr"] Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.842506 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.842568 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-txx4q\" (UniqueName: \"kubernetes.io/projected/38a915fe-37e0-4a5c-9343-9944308b2172-kube-api-access-txx4q\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.842582 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/38a915fe-37e0-4a5c-9343-9944308b2172-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.853135 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-84bb9d8bd9-hfzdr"] Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.875765 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-lfcqw"] Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.886199 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-744ffd65bc-lfcqw"] Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.901084 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-xxrjx"] Jan 23 07:13:54 crc kubenswrapper[5102]: I0123 07:13:54.907473 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5f854695bc-xxrjx"] Jan 23 07:13:55 crc kubenswrapper[5102]: I0123 07:13:55.610555 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="38a915fe-37e0-4a5c-9343-9944308b2172" path="/var/lib/kubelet/pods/38a915fe-37e0-4a5c-9343-9944308b2172/volumes" Jan 23 07:13:55 crc kubenswrapper[5102]: I0123 07:13:55.610992 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809" path="/var/lib/kubelet/pods/b32ce3a1-38f5-4a1c-b8c4-6d16a24b1809/volumes" Jan 23 07:13:55 crc kubenswrapper[5102]: I0123 07:13:55.611365 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5fa4f3-0e35-4351-be5e-0c662e042eb4" path="/var/lib/kubelet/pods/bc5fa4f3-0e35-4351-be5e-0c662e042eb4/volumes" Jan 23 07:13:55 crc kubenswrapper[5102]: I0123 07:13:55.761510 5102 generic.go:334] "Generic (PLEG): container finished" podID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerID="ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d" exitCode=0 Jan 23 07:13:55 crc kubenswrapper[5102]: I0123 07:13:55.761709 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-h9gtx" event={"ID":"ac05d076-9929-479c-b5be-43eed0ee2dcc","Type":"ContainerDied","Data":"ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d"} Jan 23 07:13:55 crc kubenswrapper[5102]: I0123 07:13:55.765492 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2f7956cc-1c1c-410f-94f8-86feb62d9124","Type":"ContainerStarted","Data":"8dc48cca7d31f7dccd997c7fd879f5ba7f70557f7c5a18f7fe00dbef48252883"} Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.777261 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2f7956cc-1c1c-410f-94f8-86feb62d9124","Type":"ContainerStarted","Data":"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c"} Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.781285 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4c43e79a-0827-4f25-a2b4-9b53ec46f96f","Type":"ContainerStarted","Data":"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e"} Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.783368 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d9ac8cd-f88c-4828-a66b-19689f94104e","Type":"ContainerStarted","Data":"f284c0397c0ecb9cee83829e220db8548fa8227c3f98e3f6c35a60e6abe2bd5e"} Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.783488 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.787106 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rkvv7" event={"ID":"fb784258-3999-4323-8ef6-06631e94e61f","Type":"ContainerStarted","Data":"6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9"} Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.787252 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-rkvv7" Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.789431 5102 generic.go:334] "Generic (PLEG): container finished" podID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerID="34c7bc908e3682ac89f82d217b46eb23f6c4c25058575eceb8b9837cffe1e1b5" exitCode=0 Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.789508 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-jvvft" event={"ID":"f0e93134-2a54-4f11-8afa-1924a7f53c3e","Type":"ContainerDied","Data":"34c7bc908e3682ac89f82d217b46eb23f6c4c25058575eceb8b9837cffe1e1b5"} Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.792626 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-h9gtx" event={"ID":"ac05d076-9929-479c-b5be-43eed0ee2dcc","Type":"ContainerStarted","Data":"89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a"} Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.806144 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.234496908 podStartE2EDuration="31.806115381s" podCreationTimestamp="2026-01-23 07:13:25 +0000 UTC" firstStartedPulling="2026-01-23 07:13:26.554531611 +0000 UTC m=+1157.374880576" lastFinishedPulling="2026-01-23 07:13:56.126150084 +0000 UTC m=+1186.946499049" observedRunningTime="2026-01-23 07:13:56.798844819 +0000 UTC m=+1187.619193794" watchObservedRunningTime="2026-01-23 07:13:56.806115381 +0000 UTC m=+1187.626464366" Jan 23 07:13:56 crc kubenswrapper[5102]: I0123 07:13:56.888936 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-rkvv7" podStartSLOduration=4.024489169 podStartE2EDuration="26.888902243s" podCreationTimestamp="2026-01-23 07:13:30 +0000 UTC" firstStartedPulling="2026-01-23 07:13:31.277053479 +0000 UTC m=+1162.097402454" lastFinishedPulling="2026-01-23 07:13:54.141466553 +0000 UTC m=+1184.961815528" observedRunningTime="2026-01-23 07:13:56.878916419 +0000 UTC m=+1187.699265414" watchObservedRunningTime="2026-01-23 07:13:56.888902243 +0000 UTC m=+1187.709251238" Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.826718 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-hqzgg" event={"ID":"df64b95b-fb03-49b3-b9e2-7d064e39c71b","Type":"ContainerStarted","Data":"f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f"} Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.832921 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-jvvft" event={"ID":"f0e93134-2a54-4f11-8afa-1924a7f53c3e","Type":"ContainerStarted","Data":"d35b90bcca008e8d2b460918535833235df5123d3c26b5b483b8808592b715a2"} Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.836896 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-h9gtx" event={"ID":"ac05d076-9929-479c-b5be-43eed0ee2dcc","Type":"ContainerStarted","Data":"e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31"} Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.837150 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.837207 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.840056 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2f7956cc-1c1c-410f-94f8-86feb62d9124","Type":"ContainerStarted","Data":"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b"} Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.843984 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4c43e79a-0827-4f25-a2b4-9b53ec46f96f","Type":"ContainerStarted","Data":"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039"} Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.848975 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d0498339-2dc7-4527-8396-50bbd00b8443","Type":"ContainerStarted","Data":"88612d72f56f1267bf785aa1c9978b748c76a6d93eedc7a0d71e8ed0f2faec38"} Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.849312 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/memcached-0" Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.870049 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-metrics-hqzgg" podStartSLOduration=24.555247139 podStartE2EDuration="27.870012135s" podCreationTimestamp="2026-01-23 07:13:31 +0000 UTC" firstStartedPulling="2026-01-23 07:13:54.608850484 +0000 UTC m=+1185.429199459" lastFinishedPulling="2026-01-23 07:13:57.92361548 +0000 UTC m=+1188.743964455" observedRunningTime="2026-01-23 07:13:58.860931578 +0000 UTC m=+1189.681280583" watchObservedRunningTime="2026-01-23 07:13:58.870012135 +0000 UTC m=+1189.690361130" Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.898828 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-controller-ovs-h9gtx" podStartSLOduration=14.914745347 podStartE2EDuration="28.898802952s" podCreationTimestamp="2026-01-23 07:13:30 +0000 UTC" firstStartedPulling="2026-01-23 07:13:39.892671852 +0000 UTC m=+1170.713020827" lastFinishedPulling="2026-01-23 07:13:53.876729457 +0000 UTC m=+1184.697078432" observedRunningTime="2026-01-23 07:13:58.894564613 +0000 UTC m=+1189.714913628" watchObservedRunningTime="2026-01-23 07:13:58.898802952 +0000 UTC m=+1189.719151937" Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.967089 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-sb-0" podStartSLOduration=23.814674464 podStartE2EDuration="26.967061832s" podCreationTimestamp="2026-01-23 07:13:32 +0000 UTC" firstStartedPulling="2026-01-23 07:13:54.77841315 +0000 UTC m=+1185.598762125" lastFinishedPulling="2026-01-23 07:13:57.930800518 +0000 UTC m=+1188.751149493" observedRunningTime="2026-01-23 07:13:58.929766925 +0000 UTC m=+1189.750115930" watchObservedRunningTime="2026-01-23 07:13:58.967061832 +0000 UTC m=+1189.787410807" Jan 23 07:13:58 crc kubenswrapper[5102]: I0123 07:13:58.970407 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-7878659675-jvvft" podStartSLOduration=25.171263655 podStartE2EDuration="26.970390993s" podCreationTimestamp="2026-01-23 07:13:32 +0000 UTC" firstStartedPulling="2026-01-23 07:13:53.871016502 +0000 UTC m=+1184.691365477" lastFinishedPulling="2026-01-23 07:13:55.67014384 +0000 UTC m=+1186.490492815" observedRunningTime="2026-01-23 07:13:58.960265804 +0000 UTC m=+1189.780614819" watchObservedRunningTime="2026-01-23 07:13:58.970390993 +0000 UTC m=+1189.790739978" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.002033 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovsdbserver-nb-0" podStartSLOduration=4.14713093 podStartE2EDuration="31.002003027s" podCreationTimestamp="2026-01-23 07:13:28 +0000 UTC" firstStartedPulling="2026-01-23 07:13:31.273837591 +0000 UTC m=+1162.094186566" lastFinishedPulling="2026-01-23 07:13:58.128709678 +0000 UTC m=+1188.949058663" observedRunningTime="2026-01-23 07:13:58.994219249 +0000 UTC m=+1189.814568234" watchObservedRunningTime="2026-01-23 07:13:59.002003027 +0000 UTC m=+1189.822352032" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.027245 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/memcached-0" podStartSLOduration=2.387884345 podStartE2EDuration="36.027225235s" podCreationTimestamp="2026-01-23 07:13:23 +0000 UTC" firstStartedPulling="2026-01-23 07:13:24.439616612 +0000 UTC m=+1155.259965587" lastFinishedPulling="2026-01-23 07:13:58.078957502 +0000 UTC m=+1188.899306477" observedRunningTime="2026-01-23 07:13:59.024875943 +0000 UTC m=+1189.845224918" watchObservedRunningTime="2026-01-23 07:13:59.027225235 +0000 UTC m=+1189.847574210" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.366634 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-scxcj"] Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.421275 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-qbtrx"] Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.422901 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.430372 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovsdbserver-sb" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.440364 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-qbtrx"] Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.555741 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.556225 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.556254 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-dns-svc\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.556282 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjg4h\" (UniqueName: \"kubernetes.io/projected/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-kube-api-access-jjg4h\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.556323 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-config\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.658255 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.658429 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.658458 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-dns-svc\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.658488 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjg4h\" (UniqueName: \"kubernetes.io/projected/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-kube-api-access-jjg4h\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.658561 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-config\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.660351 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-nb\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.660613 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-dns-svc\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.661099 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-config\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.662840 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-sb\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.692532 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjg4h\" (UniqueName: \"kubernetes.io/projected/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-kube-api-access-jjg4h\") pod \"dnsmasq-dns-586b989cdc-qbtrx\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.767972 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.856634 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" event={"ID":"6e143893-c456-4bec-8400-d09686165f84","Type":"ContainerDied","Data":"ba7f0ce7428b819bdcc84b2c674fb7d094c2b9d375f4f5ff42041957d7e9f8d3"} Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.856697 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-95f5f6995-scxcj" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.857670 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1ede537b-39d8-483c-9a2d-4ace36319060","Type":"ContainerStarted","Data":"433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c"} Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.860020 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6","Type":"ContainerStarted","Data":"2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5"} Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.860617 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.861067 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-config\") pod \"6e143893-c456-4bec-8400-d09686165f84\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.861129 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcl2k\" (UniqueName: \"kubernetes.io/projected/6e143893-c456-4bec-8400-d09686165f84-kube-api-access-xcl2k\") pod \"6e143893-c456-4bec-8400-d09686165f84\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.861165 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-dns-svc\") pod \"6e143893-c456-4bec-8400-d09686165f84\" (UID: \"6e143893-c456-4bec-8400-d09686165f84\") " Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.862094 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "6e143893-c456-4bec-8400-d09686165f84" (UID: "6e143893-c456-4bec-8400-d09686165f84"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.862367 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-config" (OuterVolumeSpecName: "config") pod "6e143893-c456-4bec-8400-d09686165f84" (UID: "6e143893-c456-4bec-8400-d09686165f84"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.867358 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e143893-c456-4bec-8400-d09686165f84-kube-api-access-xcl2k" (OuterVolumeSpecName: "kube-api-access-xcl2k") pod "6e143893-c456-4bec-8400-d09686165f84" (UID: "6e143893-c456-4bec-8400-d09686165f84"). InnerVolumeSpecName "kube-api-access-xcl2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.915723 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.963413 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.963445 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcl2k\" (UniqueName: \"kubernetes.io/projected/6e143893-c456-4bec-8400-d09686165f84-kube-api-access-xcl2k\") on node \"crc\" DevicePath \"\"" Jan 23 07:13:59 crc kubenswrapper[5102]: I0123 07:13:59.963458 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/6e143893-c456-4bec-8400-d09686165f84-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.195625 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-nb-0" Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.195796 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-nb-0" Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.341756 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-scxcj"] Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.349840 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-95f5f6995-scxcj"] Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.363366 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-nb-0" Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.410122 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-qbtrx"] Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.716616 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/ovsdbserver-sb-0" Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.758588 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/ovsdbserver-sb-0" Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.874903 5102 generic.go:334] "Generic (PLEG): container finished" podID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerID="f1b5f50db38825ac5b1b1bc777c4771251109d8a6fab61fc94c81cd5eb334884" exitCode=0 Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.874995 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" event={"ID":"7ea1bfb4-980f-4772-ad83-cfc4e09b773a","Type":"ContainerDied","Data":"f1b5f50db38825ac5b1b1bc777c4771251109d8a6fab61fc94c81cd5eb334884"} Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.875033 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" event={"ID":"7ea1bfb4-980f-4772-ad83-cfc4e09b773a","Type":"ContainerStarted","Data":"f419f25be0f805425267a3cca94f479faeb4be366c501fc971663a360adb22f6"} Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.878866 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1ea732e7-d11d-4e12-9d44-f8fcafa50de5","Type":"ContainerStarted","Data":"dac1e501d0f018ad7a331fa5911c1ac5b2f12ea3b755131154923e975ef2f708"} Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.880498 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovsdbserver-sb-0" Jan 23 07:14:00 crc kubenswrapper[5102]: I0123 07:14:00.940571 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-nb-0" Jan 23 07:14:01 crc kubenswrapper[5102]: I0123 07:14:01.616091 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e143893-c456-4bec-8400-d09686165f84" path="/var/lib/kubelet/pods/6e143893-c456-4bec-8400-d09686165f84/volumes" Jan 23 07:14:01 crc kubenswrapper[5102]: I0123 07:14:01.890244 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" event={"ID":"7ea1bfb4-980f-4772-ad83-cfc4e09b773a","Type":"ContainerStarted","Data":"31dbf721e727dc7a8177c0866fc81854eef01e8b2f0848c0d9447f365fb555f2"} Jan 23 07:14:01 crc kubenswrapper[5102]: I0123 07:14:01.948780 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" podStartSLOduration=2.9487548500000003 podStartE2EDuration="2.94875485s" podCreationTimestamp="2026-01-23 07:13:59 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:01.946895903 +0000 UTC m=+1192.767244898" watchObservedRunningTime="2026-01-23 07:14:01.94875485 +0000 UTC m=+1192.769103825" Jan 23 07:14:01 crc kubenswrapper[5102]: I0123 07:14:01.977701 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovsdbserver-sb-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.157052 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-northd-0"] Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.159211 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.160958 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-config" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.161195 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ovnnorthd-ovndbs" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.161622 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ovnnorthd-ovnnorthd-dockercfg-jlgxf" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.161980 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovnnorthd-scripts" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.175002 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.267342 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.267408 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.267470 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-config\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.267498 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.267518 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.267532 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-scripts\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.267571 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xpfw8\" (UniqueName: \"kubernetes.io/projected/1a44c7a2-d363-4438-b9db-ebd62b910427-kube-api-access-xpfw8\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.370079 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.370193 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-config\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.370226 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.370248 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.370263 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-scripts\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.370288 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xpfw8\" (UniqueName: \"kubernetes.io/projected/1a44c7a2-d363-4438-b9db-ebd62b910427-kube-api-access-xpfw8\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.370342 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.371894 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-rundir\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.372219 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-config\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.372361 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-scripts\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.376271 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-metrics-certs-tls-certs\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.377286 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-northd-tls-certs\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.381729 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-combined-ca-bundle\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.391298 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xpfw8\" (UniqueName: \"kubernetes.io/projected/1a44c7a2-d363-4438-b9db-ebd62b910427-kube-api-access-xpfw8\") pod \"ovn-northd-0\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.488092 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 23 07:14:02 crc kubenswrapper[5102]: I0123 07:14:02.902176 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:14:03 crc kubenswrapper[5102]: I0123 07:14:03.079781 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-northd-0"] Jan 23 07:14:03 crc kubenswrapper[5102]: I0123 07:14:03.555026 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/memcached-0" Jan 23 07:14:03 crc kubenswrapper[5102]: I0123 07:14:03.910110 5102 generic.go:334] "Generic (PLEG): container finished" podID="1ede537b-39d8-483c-9a2d-4ace36319060" containerID="433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c" exitCode=0 Jan 23 07:14:03 crc kubenswrapper[5102]: I0123 07:14:03.910411 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1ede537b-39d8-483c-9a2d-4ace36319060","Type":"ContainerDied","Data":"433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c"} Jan 23 07:14:03 crc kubenswrapper[5102]: I0123 07:14:03.912741 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1a44c7a2-d363-4438-b9db-ebd62b910427","Type":"ContainerStarted","Data":"e564424f1c4d0233e925a77b797ea629df0c96a2b1b0915616fdf829a826b2cb"} Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.542964 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.571377 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jvvft"] Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.572475 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-7878659675-jvvft" podUID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerName="dnsmasq-dns" containerID="cri-o://d35b90bcca008e8d2b460918535833235df5123d3c26b5b483b8808592b715a2" gracePeriod=10 Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.576392 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.652108 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-9fgnj"] Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.673056 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.703897 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-9fgnj"] Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.866681 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cgrjd\" (UniqueName: \"kubernetes.io/projected/0f92ba08-c435-49a6-96cc-dd18ef33f14a-kube-api-access-cgrjd\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.867351 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-config\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.867428 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.867465 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.867724 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.960338 5102 generic.go:334] "Generic (PLEG): container finished" podID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerID="d35b90bcca008e8d2b460918535833235df5123d3c26b5b483b8808592b715a2" exitCode=0 Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.960492 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-jvvft" event={"ID":"f0e93134-2a54-4f11-8afa-1924a7f53c3e","Type":"ContainerDied","Data":"d35b90bcca008e8d2b460918535833235df5123d3c26b5b483b8808592b715a2"} Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.972180 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cgrjd\" (UniqueName: \"kubernetes.io/projected/0f92ba08-c435-49a6-96cc-dd18ef33f14a-kube-api-access-cgrjd\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.972316 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-config\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.972354 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.972392 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.972532 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.973717 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-sb\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.974666 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-nb\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.974904 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-dns-svc\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:05 crc kubenswrapper[5102]: I0123 07:14:05.975802 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-config\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.005796 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cgrjd\" (UniqueName: \"kubernetes.io/projected/0f92ba08-c435-49a6-96cc-dd18ef33f14a-kube-api-access-cgrjd\") pod \"dnsmasq-dns-67fdf7998c-9fgnj\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.112285 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.161184 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.182500 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rhbfp\" (UniqueName: \"kubernetes.io/projected/f0e93134-2a54-4f11-8afa-1924a7f53c3e-kube-api-access-rhbfp\") pod \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.182580 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-dns-svc\") pod \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.182774 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-ovsdbserver-nb\") pod \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.182803 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-config\") pod \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\" (UID: \"f0e93134-2a54-4f11-8afa-1924a7f53c3e\") " Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.207068 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f0e93134-2a54-4f11-8afa-1924a7f53c3e-kube-api-access-rhbfp" (OuterVolumeSpecName: "kube-api-access-rhbfp") pod "f0e93134-2a54-4f11-8afa-1924a7f53c3e" (UID: "f0e93134-2a54-4f11-8afa-1924a7f53c3e"). InnerVolumeSpecName "kube-api-access-rhbfp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.284823 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rhbfp\" (UniqueName: \"kubernetes.io/projected/f0e93134-2a54-4f11-8afa-1924a7f53c3e-kube-api-access-rhbfp\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.293921 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "f0e93134-2a54-4f11-8afa-1924a7f53c3e" (UID: "f0e93134-2a54-4f11-8afa-1924a7f53c3e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.310132 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "f0e93134-2a54-4f11-8afa-1924a7f53c3e" (UID: "f0e93134-2a54-4f11-8afa-1924a7f53c3e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.315189 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-config" (OuterVolumeSpecName: "config") pod "f0e93134-2a54-4f11-8afa-1924a7f53c3e" (UID: "f0e93134-2a54-4f11-8afa-1924a7f53c3e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.387434 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.387470 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.387482 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/f0e93134-2a54-4f11-8afa-1924a7f53c3e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.741734 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-9fgnj"] Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.815830 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-storage-0"] Jan 23 07:14:06 crc kubenswrapper[5102]: E0123 07:14:06.816656 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerName="init" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.816741 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerName="init" Jan 23 07:14:06 crc kubenswrapper[5102]: E0123 07:14:06.816808 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerName="dnsmasq-dns" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.816861 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerName="dnsmasq-dns" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.817148 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" containerName="dnsmasq-dns" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.830656 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.834752 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-2mrb5" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.834885 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-storage-config-data" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.835133 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-conf" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.835244 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-files" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.855633 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.978111 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" event={"ID":"0f92ba08-c435-49a6-96cc-dd18ef33f14a","Type":"ContainerStarted","Data":"f3ecf2b23cf99280baff1a8af2c780709fdcfe3a185d73f2fb3f0b09fb1c4ab7"} Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.980682 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0e1fd671-9192-4406-b7ea-3a33b4cdec57","Type":"ContainerStarted","Data":"c460a5da6c92f225aaabae26dde8398c051c62f311c254bc8f7ac9b476cfabcf"} Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.989685 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1ede537b-39d8-483c-9a2d-4ace36319060","Type":"ContainerStarted","Data":"80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8"} Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.992184 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-7878659675-jvvft" event={"ID":"f0e93134-2a54-4f11-8afa-1924a7f53c3e","Type":"ContainerDied","Data":"ef44c4900bdd8daf717d77d70b3e1650cabe059c2183961699849076b903d194"} Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.992236 5102 scope.go:117] "RemoveContainer" containerID="d35b90bcca008e8d2b460918535833235df5123d3c26b5b483b8808592b715a2" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.992407 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-7878659675-jvvft" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.995838 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1a44c7a2-d363-4438-b9db-ebd62b910427","Type":"ContainerStarted","Data":"fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3"} Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.995903 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1a44c7a2-d363-4438-b9db-ebd62b910427","Type":"ContainerStarted","Data":"28fd2f580f926860b97dac693969e9dcc8ef486d9834e279e718727164266b75"} Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.997045 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ovn-northd-0" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.998792 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20474222-aadd-44c0-8c4e-f0b4bd0147c5-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.998892 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5mfgc\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-kube-api-access-5mfgc\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.998964 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.999043 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.999098 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-cache\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:06 crc kubenswrapper[5102]: I0123 07:14:06.999148 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-lock\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.022823 5102 scope.go:117] "RemoveContainer" containerID="34c7bc908e3682ac89f82d217b46eb23f6c4c25058575eceb8b9837cffe1e1b5" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.059717 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ovn-northd-0" podStartSLOduration=2.5315879949999998 podStartE2EDuration="5.059691703s" podCreationTimestamp="2026-01-23 07:14:02 +0000 UTC" firstStartedPulling="2026-01-23 07:14:03.084423752 +0000 UTC m=+1193.904772727" lastFinishedPulling="2026-01-23 07:14:05.61252746 +0000 UTC m=+1196.432876435" observedRunningTime="2026-01-23 07:14:07.0376194 +0000 UTC m=+1197.857968405" watchObservedRunningTime="2026-01-23 07:14:07.059691703 +0000 UTC m=+1197.880040688" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.072561 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jvvft"] Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.078508 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-7878659675-jvvft"] Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.083437 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-galera-0" podStartSLOduration=10.68501736 podStartE2EDuration="47.083419086s" podCreationTimestamp="2026-01-23 07:13:20 +0000 UTC" firstStartedPulling="2026-01-23 07:13:22.650112208 +0000 UTC m=+1153.470461173" lastFinishedPulling="2026-01-23 07:13:59.048513924 +0000 UTC m=+1189.868862899" observedRunningTime="2026-01-23 07:14:07.079886098 +0000 UTC m=+1197.900235083" watchObservedRunningTime="2026-01-23 07:14:07.083419086 +0000 UTC m=+1197.903768061" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.101786 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20474222-aadd-44c0-8c4e-f0b4bd0147c5-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.102003 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5mfgc\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-kube-api-access-5mfgc\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.102921 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.103213 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.103258 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") device mount path \"/mnt/openstack/pv05\"" pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: E0123 07:14:07.104048 5102 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 07:14:07 crc kubenswrapper[5102]: E0123 07:14:07.104080 5102 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 07:14:07 crc kubenswrapper[5102]: E0123 07:14:07.104129 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift podName:20474222-aadd-44c0-8c4e-f0b4bd0147c5 nodeName:}" failed. No retries permitted until 2026-01-23 07:14:07.604108726 +0000 UTC m=+1198.424457711 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift") pod "swift-storage-0" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5") : configmap "swift-ring-files" not found Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.104299 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-cache\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.104466 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-lock\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.105435 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-cache\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.105625 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-lock\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.108817 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20474222-aadd-44c0-8c4e-f0b4bd0147c5-combined-ca-bundle\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.126920 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5mfgc\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-kube-api-access-5mfgc\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.128059 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.612671 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f0e93134-2a54-4f11-8afa-1924a7f53c3e" path="/var/lib/kubelet/pods/f0e93134-2a54-4f11-8afa-1924a7f53c3e/volumes" Jan 23 07:14:07 crc kubenswrapper[5102]: I0123 07:14:07.614872 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:07 crc kubenswrapper[5102]: E0123 07:14:07.615165 5102 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 07:14:07 crc kubenswrapper[5102]: E0123 07:14:07.615192 5102 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 07:14:07 crc kubenswrapper[5102]: E0123 07:14:07.615255 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift podName:20474222-aadd-44c0-8c4e-f0b4bd0147c5 nodeName:}" failed. No retries permitted until 2026-01-23 07:14:08.615233979 +0000 UTC m=+1199.435582964 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift") pod "swift-storage-0" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5") : configmap "swift-ring-files" not found Jan 23 07:14:08 crc kubenswrapper[5102]: I0123 07:14:08.012380 5102 generic.go:334] "Generic (PLEG): container finished" podID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerID="050b1a1ec1e060b981c1246b97bab3a71cb8dd765325a2b812e485720f3c1d94" exitCode=0 Jan 23 07:14:08 crc kubenswrapper[5102]: I0123 07:14:08.013807 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" event={"ID":"0f92ba08-c435-49a6-96cc-dd18ef33f14a","Type":"ContainerDied","Data":"050b1a1ec1e060b981c1246b97bab3a71cb8dd765325a2b812e485720f3c1d94"} Jan 23 07:14:08 crc kubenswrapper[5102]: I0123 07:14:08.633410 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:08 crc kubenswrapper[5102]: E0123 07:14:08.633785 5102 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 07:14:08 crc kubenswrapper[5102]: E0123 07:14:08.634026 5102 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 07:14:08 crc kubenswrapper[5102]: E0123 07:14:08.634141 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift podName:20474222-aadd-44c0-8c4e-f0b4bd0147c5 nodeName:}" failed. No retries permitted until 2026-01-23 07:14:10.634097722 +0000 UTC m=+1201.454446737 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift") pod "swift-storage-0" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5") : configmap "swift-ring-files" not found Jan 23 07:14:09 crc kubenswrapper[5102]: I0123 07:14:09.026080 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" event={"ID":"0f92ba08-c435-49a6-96cc-dd18ef33f14a","Type":"ContainerStarted","Data":"1ceec8956499865fff96f6ae64bb6e4f03781e2ae5d8da6021390287677e14c9"} Jan 23 07:14:09 crc kubenswrapper[5102]: I0123 07:14:09.058511 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" podStartSLOduration=4.058472252 podStartE2EDuration="4.058472252s" podCreationTimestamp="2026-01-23 07:14:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:09.056696298 +0000 UTC m=+1199.877045283" watchObservedRunningTime="2026-01-23 07:14:09.058472252 +0000 UTC m=+1199.878821267" Jan 23 07:14:09 crc kubenswrapper[5102]: I0123 07:14:09.917733 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.032793 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.668392 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-ring-rebalance-7ctb2"] Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.669419 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.669552 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:10 crc kubenswrapper[5102]: E0123 07:14:10.669770 5102 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 07:14:10 crc kubenswrapper[5102]: E0123 07:14:10.669792 5102 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 07:14:10 crc kubenswrapper[5102]: E0123 07:14:10.669842 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift podName:20474222-aadd-44c0-8c4e-f0b4bd0147c5 nodeName:}" failed. No retries permitted until 2026-01-23 07:14:14.669827108 +0000 UTC m=+1205.490176083 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift") pod "swift-storage-0" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5") : configmap "swift-ring-files" not found Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.678712 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-config-data" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.679020 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.680223 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"swift-ring-scripts" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.705278 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7ctb2"] Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.770351 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-ring-data-devices\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.770776 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33b32693-d02a-42ef-b749-3e0b883b3227-etc-swift\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.770927 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-dispersionconf\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.771081 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cw42r\" (UniqueName: \"kubernetes.io/projected/33b32693-d02a-42ef-b749-3e0b883b3227-kube-api-access-cw42r\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.771206 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-swiftconf\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.771322 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-combined-ca-bundle\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.771431 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-scripts\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.872909 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-combined-ca-bundle\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.872968 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-scripts\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.873052 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-ring-data-devices\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.873084 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33b32693-d02a-42ef-b749-3e0b883b3227-etc-swift\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.873117 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-dispersionconf\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.873147 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cw42r\" (UniqueName: \"kubernetes.io/projected/33b32693-d02a-42ef-b749-3e0b883b3227-kube-api-access-cw42r\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.873189 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-swiftconf\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.874102 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33b32693-d02a-42ef-b749-3e0b883b3227-etc-swift\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.874969 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-scripts\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.875406 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-ring-data-devices\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.882712 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-swiftconf\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.882849 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-combined-ca-bundle\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.900916 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-dispersionconf\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.913116 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cw42r\" (UniqueName: \"kubernetes.io/projected/33b32693-d02a-42ef-b749-3e0b883b3227-kube-api-access-cw42r\") pod \"swift-ring-rebalance-7ctb2\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.989757 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-swift-dockercfg-2mrb5" Jan 23 07:14:10 crc kubenswrapper[5102]: I0123 07:14:10.998625 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:11 crc kubenswrapper[5102]: I0123 07:14:11.046750 5102 generic.go:334] "Generic (PLEG): container finished" podID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerID="c460a5da6c92f225aaabae26dde8398c051c62f311c254bc8f7ac9b476cfabcf" exitCode=0 Jan 23 07:14:11 crc kubenswrapper[5102]: I0123 07:14:11.046872 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0e1fd671-9192-4406-b7ea-3a33b4cdec57","Type":"ContainerDied","Data":"c460a5da6c92f225aaabae26dde8398c051c62f311c254bc8f7ac9b476cfabcf"} Jan 23 07:14:11 crc kubenswrapper[5102]: I0123 07:14:11.479357 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-ring-rebalance-7ctb2"] Jan 23 07:14:11 crc kubenswrapper[5102]: W0123 07:14:11.484286 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod33b32693_d02a_42ef_b749_3e0b883b3227.slice/crio-293e41b8cff01e8c51565671ed3f34942e5e6dfd27725a2c4d479633793451cf WatchSource:0}: Error finding container 293e41b8cff01e8c51565671ed3f34942e5e6dfd27725a2c4d479633793451cf: Status 404 returned error can't find the container with id 293e41b8cff01e8c51565671ed3f34942e5e6dfd27725a2c4d479633793451cf Jan 23 07:14:11 crc kubenswrapper[5102]: I0123 07:14:11.867335 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-galera-0" Jan 23 07:14:11 crc kubenswrapper[5102]: I0123 07:14:11.867380 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-galera-0" Jan 23 07:14:12 crc kubenswrapper[5102]: I0123 07:14:12.061636 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7ctb2" event={"ID":"33b32693-d02a-42ef-b749-3e0b883b3227","Type":"ContainerStarted","Data":"293e41b8cff01e8c51565671ed3f34942e5e6dfd27725a2c4d479633793451cf"} Jan 23 07:14:12 crc kubenswrapper[5102]: I0123 07:14:12.063657 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0e1fd671-9192-4406-b7ea-3a33b4cdec57","Type":"ContainerStarted","Data":"4e4445ea0de06f9fa2ec0a6389c9a2952e55e6ae8854f80a494b563a6aab848e"} Jan 23 07:14:12 crc kubenswrapper[5102]: I0123 07:14:12.084078 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstack-cell1-galera-0" podStartSLOduration=-9223371985.770721 podStartE2EDuration="51.084053988s" podCreationTimestamp="2026-01-23 07:13:21 +0000 UTC" firstStartedPulling="2026-01-23 07:13:24.079074287 +0000 UTC m=+1154.899423262" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:12.08217593 +0000 UTC m=+1202.902524915" watchObservedRunningTime="2026-01-23 07:14:12.084053988 +0000 UTC m=+1202.904402963" Jan 23 07:14:12 crc kubenswrapper[5102]: I0123 07:14:12.905219 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-galera-0" Jan 23 07:14:12 crc kubenswrapper[5102]: I0123 07:14:12.998330 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-galera-0" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.330898 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8ed5-account-create-update-rnftj"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.337370 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.341272 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.356876 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/openstack-cell1-galera-0" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.356924 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/openstack-cell1-galera-0" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.360181 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8ed5-account-create-update-rnftj"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.428232 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfwst\" (UniqueName: \"kubernetes.io/projected/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-kube-api-access-wfwst\") pod \"keystone-8ed5-account-create-update-rnftj\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.428692 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-operator-scripts\") pod \"keystone-8ed5-account-create-update-rnftj\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.522309 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-create-wlsm2"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.524635 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.530768 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfwst\" (UniqueName: \"kubernetes.io/projected/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-kube-api-access-wfwst\") pod \"keystone-8ed5-account-create-update-rnftj\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.530864 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-operator-scripts\") pod \"keystone-8ed5-account-create-update-rnftj\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.530936 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-81e9-account-create-update-bjcgb"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.531858 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-operator-scripts\") pod \"keystone-8ed5-account-create-update-rnftj\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.532365 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.535895 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.543246 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-wlsm2"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.551214 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-81e9-account-create-update-bjcgb"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.557232 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfwst\" (UniqueName: \"kubernetes.io/projected/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-kube-api-access-wfwst\") pod \"keystone-8ed5-account-create-update-rnftj\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.632404 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q9nq9\" (UniqueName: \"kubernetes.io/projected/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-kube-api-access-q9nq9\") pod \"placement-81e9-account-create-update-bjcgb\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.632631 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9vtx6\" (UniqueName: \"kubernetes.io/projected/13e397d3-faf3-41e3-b040-46b90a3e7c2c-kube-api-access-9vtx6\") pod \"placement-db-create-wlsm2\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.632664 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13e397d3-faf3-41e3-b040-46b90a3e7c2c-operator-scripts\") pod \"placement-db-create-wlsm2\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.632710 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-operator-scripts\") pod \"placement-81e9-account-create-update-bjcgb\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.680117 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.730133 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-create-2dcf9"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.731396 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.741169 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9vtx6\" (UniqueName: \"kubernetes.io/projected/13e397d3-faf3-41e3-b040-46b90a3e7c2c-kube-api-access-9vtx6\") pod \"placement-db-create-wlsm2\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.741290 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13e397d3-faf3-41e3-b040-46b90a3e7c2c-operator-scripts\") pod \"placement-db-create-wlsm2\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.741391 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-operator-scripts\") pod \"placement-81e9-account-create-update-bjcgb\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.741610 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q9nq9\" (UniqueName: \"kubernetes.io/projected/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-kube-api-access-q9nq9\") pod \"placement-81e9-account-create-update-bjcgb\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.742376 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2dcf9"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.742584 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13e397d3-faf3-41e3-b040-46b90a3e7c2c-operator-scripts\") pod \"placement-db-create-wlsm2\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.743467 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-operator-scripts\") pod \"placement-81e9-account-create-update-bjcgb\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.763833 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q9nq9\" (UniqueName: \"kubernetes.io/projected/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-kube-api-access-q9nq9\") pod \"placement-81e9-account-create-update-bjcgb\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.766290 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9vtx6\" (UniqueName: \"kubernetes.io/projected/13e397d3-faf3-41e3-b040-46b90a3e7c2c-kube-api-access-9vtx6\") pod \"placement-db-create-wlsm2\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.843608 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b567354-3e7f-446b-af77-f81ae5de44ce-operator-scripts\") pod \"glance-db-create-2dcf9\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.843704 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4vfrc\" (UniqueName: \"kubernetes.io/projected/8b567354-3e7f-446b-af77-f81ae5de44ce-kube-api-access-4vfrc\") pod \"glance-db-create-2dcf9\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.847027 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.861749 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.866685 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-b741-account-create-update-vfjtx"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.868060 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.873626 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-db-secret" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.896437 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-b741-account-create-update-vfjtx"] Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.945474 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4vfrc\" (UniqueName: \"kubernetes.io/projected/8b567354-3e7f-446b-af77-f81ae5de44ce-kube-api-access-4vfrc\") pod \"glance-db-create-2dcf9\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.945672 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cvfzx\" (UniqueName: \"kubernetes.io/projected/1b3f08fd-2ae7-419b-8b88-637138d66302-kube-api-access-cvfzx\") pod \"glance-b741-account-create-update-vfjtx\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.945758 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b3f08fd-2ae7-419b-8b88-637138d66302-operator-scripts\") pod \"glance-b741-account-create-update-vfjtx\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.945838 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b567354-3e7f-446b-af77-f81ae5de44ce-operator-scripts\") pod \"glance-db-create-2dcf9\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.946660 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b567354-3e7f-446b-af77-f81ae5de44ce-operator-scripts\") pod \"glance-db-create-2dcf9\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:13 crc kubenswrapper[5102]: I0123 07:14:13.981282 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4vfrc\" (UniqueName: \"kubernetes.io/projected/8b567354-3e7f-446b-af77-f81ae5de44ce-kube-api-access-4vfrc\") pod \"glance-db-create-2dcf9\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:14 crc kubenswrapper[5102]: I0123 07:14:14.047775 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cvfzx\" (UniqueName: \"kubernetes.io/projected/1b3f08fd-2ae7-419b-8b88-637138d66302-kube-api-access-cvfzx\") pod \"glance-b741-account-create-update-vfjtx\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:14 crc kubenswrapper[5102]: I0123 07:14:14.047894 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b3f08fd-2ae7-419b-8b88-637138d66302-operator-scripts\") pod \"glance-b741-account-create-update-vfjtx\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:14 crc kubenswrapper[5102]: I0123 07:14:14.048689 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b3f08fd-2ae7-419b-8b88-637138d66302-operator-scripts\") pod \"glance-b741-account-create-update-vfjtx\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:14 crc kubenswrapper[5102]: I0123 07:14:14.066080 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:14 crc kubenswrapper[5102]: I0123 07:14:14.080607 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cvfzx\" (UniqueName: \"kubernetes.io/projected/1b3f08fd-2ae7-419b-8b88-637138d66302-kube-api-access-cvfzx\") pod \"glance-b741-account-create-update-vfjtx\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:14 crc kubenswrapper[5102]: I0123 07:14:14.198188 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:14 crc kubenswrapper[5102]: I0123 07:14:14.762334 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:14 crc kubenswrapper[5102]: E0123 07:14:14.762529 5102 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 07:14:14 crc kubenswrapper[5102]: E0123 07:14:14.762565 5102 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 07:14:14 crc kubenswrapper[5102]: E0123 07:14:14.762617 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift podName:20474222-aadd-44c0-8c4e-f0b4bd0147c5 nodeName:}" failed. No retries permitted until 2026-01-23 07:14:22.762598269 +0000 UTC m=+1213.582947244 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift") pod "swift-storage-0" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5") : configmap "swift-ring-files" not found Jan 23 07:14:15 crc kubenswrapper[5102]: E0123 07:14:15.123580 5102 upgradeaware.go:427] Error proxying data from client to backend: readfrom tcp 38.102.83.195:33180->38.102.83.195:38563: write tcp 38.102.83.195:33180->38.102.83.195:38563: write: broken pipe Jan 23 07:14:16 crc kubenswrapper[5102]: I0123 07:14:16.163881 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:14:16 crc kubenswrapper[5102]: I0123 07:14:16.255335 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-qbtrx"] Jan 23 07:14:16 crc kubenswrapper[5102]: I0123 07:14:16.255987 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" podUID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerName="dnsmasq-dns" containerID="cri-o://31dbf721e727dc7a8177c0866fc81854eef01e8b2f0848c0d9447f365fb555f2" gracePeriod=10 Jan 23 07:14:16 crc kubenswrapper[5102]: I0123 07:14:16.768702 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:14:16 crc kubenswrapper[5102]: I0123 07:14:16.768771 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:14:17 crc kubenswrapper[5102]: I0123 07:14:17.575748 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-northd-0" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.126315 5102 generic.go:334] "Generic (PLEG): container finished" podID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerID="31dbf721e727dc7a8177c0866fc81854eef01e8b2f0848c0d9447f365fb555f2" exitCode=0 Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.126401 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" event={"ID":"7ea1bfb4-980f-4772-ad83-cfc4e09b773a","Type":"ContainerDied","Data":"31dbf721e727dc7a8177c0866fc81854eef01e8b2f0848c0d9447f365fb555f2"} Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.406115 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.555164 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-dns-svc\") pod \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.555238 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-config\") pod \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.555357 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-nb\") pod \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.555396 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjg4h\" (UniqueName: \"kubernetes.io/projected/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-kube-api-access-jjg4h\") pod \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.555436 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-sb\") pod \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\" (UID: \"7ea1bfb4-980f-4772-ad83-cfc4e09b773a\") " Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.567781 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-kube-api-access-jjg4h" (OuterVolumeSpecName: "kube-api-access-jjg4h") pod "7ea1bfb4-980f-4772-ad83-cfc4e09b773a" (UID: "7ea1bfb4-980f-4772-ad83-cfc4e09b773a"). InnerVolumeSpecName "kube-api-access-jjg4h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.609143 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "7ea1bfb4-980f-4772-ad83-cfc4e09b773a" (UID: "7ea1bfb4-980f-4772-ad83-cfc4e09b773a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.614458 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "7ea1bfb4-980f-4772-ad83-cfc4e09b773a" (UID: "7ea1bfb4-980f-4772-ad83-cfc4e09b773a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.620029 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "7ea1bfb4-980f-4772-ad83-cfc4e09b773a" (UID: "7ea1bfb4-980f-4772-ad83-cfc4e09b773a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.629447 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-config" (OuterVolumeSpecName: "config") pod "7ea1bfb4-980f-4772-ad83-cfc4e09b773a" (UID: "7ea1bfb4-980f-4772-ad83-cfc4e09b773a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.664138 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8ed5-account-create-update-rnftj"] Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.664377 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.664412 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjg4h\" (UniqueName: \"kubernetes.io/projected/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-kube-api-access-jjg4h\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.664426 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.664436 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.664447 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7ea1bfb4-980f-4772-ad83-cfc4e09b773a-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:18 crc kubenswrapper[5102]: W0123 07:14:18.665972 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3873fe98_794e_42f7_9b3a_b4d9e8ab64f7.slice/crio-a08cd1c57e2752981e4664fddf29ad280e9126d952a4e99c633b71eaa0a1b28f WatchSource:0}: Error finding container a08cd1c57e2752981e4664fddf29ad280e9126d952a4e99c633b71eaa0a1b28f: Status 404 returned error can't find the container with id a08cd1c57e2752981e4664fddf29ad280e9126d952a4e99c633b71eaa0a1b28f Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.770342 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-create-2dcf9"] Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.778041 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-b741-account-create-update-vfjtx"] Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.945064 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-81e9-account-create-update-bjcgb"] Jan 23 07:14:18 crc kubenswrapper[5102]: I0123 07:14:18.963211 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-create-wlsm2"] Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.147205 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81e9-account-create-update-bjcgb" event={"ID":"ee7792b2-73da-4fe7-b0c2-95ab1c382b51","Type":"ContainerStarted","Data":"74b19331d91f2bc63c87e42420f1bd30a62bfe194c193f14db9f543faf5168c7"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.150316 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" event={"ID":"7ea1bfb4-980f-4772-ad83-cfc4e09b773a","Type":"ContainerDied","Data":"f419f25be0f805425267a3cca94f479faeb4be366c501fc971663a360adb22f6"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.150780 5102 scope.go:117] "RemoveContainer" containerID="31dbf721e727dc7a8177c0866fc81854eef01e8b2f0848c0d9447f365fb555f2" Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.150385 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-586b989cdc-qbtrx" Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.154257 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2dcf9" event={"ID":"8b567354-3e7f-446b-af77-f81ae5de44ce","Type":"ContainerStarted","Data":"ed0bb614f41e42220655c7ef417f5313bc2d9845fc86d13a8a928923ae9b75a7"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.154429 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2dcf9" event={"ID":"8b567354-3e7f-446b-af77-f81ae5de44ce","Type":"ContainerStarted","Data":"25a1b319ffc5dcc4f38ac2ec15cc65de810ea7b3b1c3cfb83f4e4a05989df9b1"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.157875 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7ctb2" event={"ID":"33b32693-d02a-42ef-b749-3e0b883b3227","Type":"ContainerStarted","Data":"d8ae3da02197050db71977dd19c8a0685c7287d6b6e0cba5a68823e64d602cd4"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.160094 5102 generic.go:334] "Generic (PLEG): container finished" podID="3873fe98-794e-42f7-9b3a-b4d9e8ab64f7" containerID="23be25efbbbb49cf5b313ae643ee992afe2cc9a873f12cb787a10ffbaa4b1f4e" exitCode=0 Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.160175 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8ed5-account-create-update-rnftj" event={"ID":"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7","Type":"ContainerDied","Data":"23be25efbbbb49cf5b313ae643ee992afe2cc9a873f12cb787a10ffbaa4b1f4e"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.160204 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8ed5-account-create-update-rnftj" event={"ID":"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7","Type":"ContainerStarted","Data":"a08cd1c57e2752981e4664fddf29ad280e9126d952a4e99c633b71eaa0a1b28f"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.161875 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wlsm2" event={"ID":"13e397d3-faf3-41e3-b040-46b90a3e7c2c","Type":"ContainerStarted","Data":"95c1fd1e1e18df0f7f151d7d5d61b655edc2aebd6d41cf08b87c7d5ccbb77dae"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.164058 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b741-account-create-update-vfjtx" event={"ID":"1b3f08fd-2ae7-419b-8b88-637138d66302","Type":"ContainerStarted","Data":"089b7420219fcf0447d5dd76ce8a11dd0f0ad218a2cc2cfdd4db246cd9bf5143"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.164106 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b741-account-create-update-vfjtx" event={"ID":"1b3f08fd-2ae7-419b-8b88-637138d66302","Type":"ContainerStarted","Data":"d77582e149aa7bcfd8085b38c19319bd66d9c54fda420c5890f1f5228169fa1f"} Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.201475 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-create-2dcf9" podStartSLOduration=6.201431053 podStartE2EDuration="6.201431053s" podCreationTimestamp="2026-01-23 07:14:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:19.174556255 +0000 UTC m=+1209.994905230" watchObservedRunningTime="2026-01-23 07:14:19.201431053 +0000 UTC m=+1210.021780028" Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.205481 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-ring-rebalance-7ctb2" podStartSLOduration=2.526986204 podStartE2EDuration="9.205462117s" podCreationTimestamp="2026-01-23 07:14:10 +0000 UTC" firstStartedPulling="2026-01-23 07:14:11.486825811 +0000 UTC m=+1202.307174786" lastFinishedPulling="2026-01-23 07:14:18.165301724 +0000 UTC m=+1208.985650699" observedRunningTime="2026-01-23 07:14:19.197101132 +0000 UTC m=+1210.017450117" watchObservedRunningTime="2026-01-23 07:14:19.205462117 +0000 UTC m=+1210.025811082" Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.216165 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-b741-account-create-update-vfjtx" podStartSLOduration=6.216141012 podStartE2EDuration="6.216141012s" podCreationTimestamp="2026-01-23 07:14:13 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:19.214267295 +0000 UTC m=+1210.034616290" watchObservedRunningTime="2026-01-23 07:14:19.216141012 +0000 UTC m=+1210.036489997" Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.290794 5102 scope.go:117] "RemoveContainer" containerID="f1b5f50db38825ac5b1b1bc777c4771251109d8a6fab61fc94c81cd5eb334884" Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.297472 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-qbtrx"] Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.304371 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-586b989cdc-qbtrx"] Jan 23 07:14:19 crc kubenswrapper[5102]: I0123 07:14:19.609913 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" path="/var/lib/kubelet/pods/7ea1bfb4-980f-4772-ad83-cfc4e09b773a/volumes" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.174957 5102 generic.go:334] "Generic (PLEG): container finished" podID="ee7792b2-73da-4fe7-b0c2-95ab1c382b51" containerID="972f9a84e458c913ea4e1bc1b2c2dfc03afd961f0eabaeb65d9865461e35e88c" exitCode=0 Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.175076 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81e9-account-create-update-bjcgb" event={"ID":"ee7792b2-73da-4fe7-b0c2-95ab1c382b51","Type":"ContainerDied","Data":"972f9a84e458c913ea4e1bc1b2c2dfc03afd961f0eabaeb65d9865461e35e88c"} Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.179678 5102 generic.go:334] "Generic (PLEG): container finished" podID="8b567354-3e7f-446b-af77-f81ae5de44ce" containerID="ed0bb614f41e42220655c7ef417f5313bc2d9845fc86d13a8a928923ae9b75a7" exitCode=0 Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.179737 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2dcf9" event={"ID":"8b567354-3e7f-446b-af77-f81ae5de44ce","Type":"ContainerDied","Data":"ed0bb614f41e42220655c7ef417f5313bc2d9845fc86d13a8a928923ae9b75a7"} Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.181900 5102 generic.go:334] "Generic (PLEG): container finished" podID="13e397d3-faf3-41e3-b040-46b90a3e7c2c" containerID="0296f13483bdd5b016788ffbae8298e92f4031ce3d313e2b7027e65f01e58acd" exitCode=0 Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.181927 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wlsm2" event={"ID":"13e397d3-faf3-41e3-b040-46b90a3e7c2c","Type":"ContainerDied","Data":"0296f13483bdd5b016788ffbae8298e92f4031ce3d313e2b7027e65f01e58acd"} Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.183904 5102 generic.go:334] "Generic (PLEG): container finished" podID="1b3f08fd-2ae7-419b-8b88-637138d66302" containerID="089b7420219fcf0447d5dd76ce8a11dd0f0ad218a2cc2cfdd4db246cd9bf5143" exitCode=0 Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.183982 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b741-account-create-update-vfjtx" event={"ID":"1b3f08fd-2ae7-419b-8b88-637138d66302","Type":"ContainerDied","Data":"089b7420219fcf0447d5dd76ce8a11dd0f0ad218a2cc2cfdd4db246cd9bf5143"} Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.442382 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-w8pv4"] Jan 23 07:14:20 crc kubenswrapper[5102]: E0123 07:14:20.442926 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerName="init" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.442955 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerName="init" Jan 23 07:14:20 crc kubenswrapper[5102]: E0123 07:14:20.443014 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerName="dnsmasq-dns" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.443027 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerName="dnsmasq-dns" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.443237 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="7ea1bfb4-980f-4772-ad83-cfc4e09b773a" containerName="dnsmasq-dns" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.444141 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.448346 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.454857 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-w8pv4"] Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.506288 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82a3dd9f-b277-4e77-9f83-53337cb91b0c-operator-scripts\") pod \"root-account-create-update-w8pv4\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.506478 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxr8s\" (UniqueName: \"kubernetes.io/projected/82a3dd9f-b277-4e77-9f83-53337cb91b0c-kube-api-access-nxr8s\") pod \"root-account-create-update-w8pv4\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.561140 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.608056 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82a3dd9f-b277-4e77-9f83-53337cb91b0c-operator-scripts\") pod \"root-account-create-update-w8pv4\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.608559 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxr8s\" (UniqueName: \"kubernetes.io/projected/82a3dd9f-b277-4e77-9f83-53337cb91b0c-kube-api-access-nxr8s\") pod \"root-account-create-update-w8pv4\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.608954 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82a3dd9f-b277-4e77-9f83-53337cb91b0c-operator-scripts\") pod \"root-account-create-update-w8pv4\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.628263 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxr8s\" (UniqueName: \"kubernetes.io/projected/82a3dd9f-b277-4e77-9f83-53337cb91b0c-kube-api-access-nxr8s\") pod \"root-account-create-update-w8pv4\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.709899 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wfwst\" (UniqueName: \"kubernetes.io/projected/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-kube-api-access-wfwst\") pod \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.709972 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-operator-scripts\") pod \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\" (UID: \"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7\") " Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.711026 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3873fe98-794e-42f7-9b3a-b4d9e8ab64f7" (UID: "3873fe98-794e-42f7-9b3a-b4d9e8ab64f7"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.715929 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-kube-api-access-wfwst" (OuterVolumeSpecName: "kube-api-access-wfwst") pod "3873fe98-794e-42f7-9b3a-b4d9e8ab64f7" (UID: "3873fe98-794e-42f7-9b3a-b4d9e8ab64f7"). InnerVolumeSpecName "kube-api-access-wfwst". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.769997 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.814307 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:20 crc kubenswrapper[5102]: I0123 07:14:20.814358 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wfwst\" (UniqueName: \"kubernetes.io/projected/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7-kube-api-access-wfwst\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.215257 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-8ed5-account-create-update-rnftj" event={"ID":"3873fe98-794e-42f7-9b3a-b4d9e8ab64f7","Type":"ContainerDied","Data":"a08cd1c57e2752981e4664fddf29ad280e9126d952a4e99c633b71eaa0a1b28f"} Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.215332 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a08cd1c57e2752981e4664fddf29ad280e9126d952a4e99c633b71eaa0a1b28f" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.215482 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-rnftj" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.267726 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-w8pv4"] Jan 23 07:14:21 crc kubenswrapper[5102]: W0123 07:14:21.280358 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod82a3dd9f_b277_4e77_9f83_53337cb91b0c.slice/crio-64d57af49f37dce6380dd89bc69cb755f92e04a7d49c575d865ea9050986c8d0 WatchSource:0}: Error finding container 64d57af49f37dce6380dd89bc69cb755f92e04a7d49c575d865ea9050986c8d0: Status 404 returned error can't find the container with id 64d57af49f37dce6380dd89bc69cb755f92e04a7d49c575d865ea9050986c8d0 Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.544988 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/openstack-cell1-galera-0" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.623080 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.693567 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/openstack-cell1-galera-0" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.729809 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4vfrc\" (UniqueName: \"kubernetes.io/projected/8b567354-3e7f-446b-af77-f81ae5de44ce-kube-api-access-4vfrc\") pod \"8b567354-3e7f-446b-af77-f81ae5de44ce\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.730607 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b567354-3e7f-446b-af77-f81ae5de44ce-operator-scripts\") pod \"8b567354-3e7f-446b-af77-f81ae5de44ce\" (UID: \"8b567354-3e7f-446b-af77-f81ae5de44ce\") " Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.733712 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8b567354-3e7f-446b-af77-f81ae5de44ce-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "8b567354-3e7f-446b-af77-f81ae5de44ce" (UID: "8b567354-3e7f-446b-af77-f81ae5de44ce"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.742370 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8b567354-3e7f-446b-af77-f81ae5de44ce-kube-api-access-4vfrc" (OuterVolumeSpecName: "kube-api-access-4vfrc") pod "8b567354-3e7f-446b-af77-f81ae5de44ce" (UID: "8b567354-3e7f-446b-af77-f81ae5de44ce"). InnerVolumeSpecName "kube-api-access-4vfrc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.811748 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.833405 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4vfrc\" (UniqueName: \"kubernetes.io/projected/8b567354-3e7f-446b-af77-f81ae5de44ce-kube-api-access-4vfrc\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.833446 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/8b567354-3e7f-446b-af77-f81ae5de44ce-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.837009 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.849673 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.934174 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q9nq9\" (UniqueName: \"kubernetes.io/projected/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-kube-api-access-q9nq9\") pod \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.934413 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-operator-scripts\") pod \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\" (UID: \"ee7792b2-73da-4fe7-b0c2-95ab1c382b51\") " Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.935050 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ee7792b2-73da-4fe7-b0c2-95ab1c382b51" (UID: "ee7792b2-73da-4fe7-b0c2-95ab1c382b51"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:21 crc kubenswrapper[5102]: I0123 07:14:21.938385 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-kube-api-access-q9nq9" (OuterVolumeSpecName: "kube-api-access-q9nq9") pod "ee7792b2-73da-4fe7-b0c2-95ab1c382b51" (UID: "ee7792b2-73da-4fe7-b0c2-95ab1c382b51"). InnerVolumeSpecName "kube-api-access-q9nq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.035753 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b3f08fd-2ae7-419b-8b88-637138d66302-operator-scripts\") pod \"1b3f08fd-2ae7-419b-8b88-637138d66302\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.036145 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13e397d3-faf3-41e3-b040-46b90a3e7c2c-operator-scripts\") pod \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.036323 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9vtx6\" (UniqueName: \"kubernetes.io/projected/13e397d3-faf3-41e3-b040-46b90a3e7c2c-kube-api-access-9vtx6\") pod \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\" (UID: \"13e397d3-faf3-41e3-b040-46b90a3e7c2c\") " Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.036491 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cvfzx\" (UniqueName: \"kubernetes.io/projected/1b3f08fd-2ae7-419b-8b88-637138d66302-kube-api-access-cvfzx\") pod \"1b3f08fd-2ae7-419b-8b88-637138d66302\" (UID: \"1b3f08fd-2ae7-419b-8b88-637138d66302\") " Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.036527 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1b3f08fd-2ae7-419b-8b88-637138d66302-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1b3f08fd-2ae7-419b-8b88-637138d66302" (UID: "1b3f08fd-2ae7-419b-8b88-637138d66302"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.036647 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/13e397d3-faf3-41e3-b040-46b90a3e7c2c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "13e397d3-faf3-41e3-b040-46b90a3e7c2c" (UID: "13e397d3-faf3-41e3-b040-46b90a3e7c2c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.037694 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/13e397d3-faf3-41e3-b040-46b90a3e7c2c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.037749 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.037763 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q9nq9\" (UniqueName: \"kubernetes.io/projected/ee7792b2-73da-4fe7-b0c2-95ab1c382b51-kube-api-access-q9nq9\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.037779 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1b3f08fd-2ae7-419b-8b88-637138d66302-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.040084 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1b3f08fd-2ae7-419b-8b88-637138d66302-kube-api-access-cvfzx" (OuterVolumeSpecName: "kube-api-access-cvfzx") pod "1b3f08fd-2ae7-419b-8b88-637138d66302" (UID: "1b3f08fd-2ae7-419b-8b88-637138d66302"). InnerVolumeSpecName "kube-api-access-cvfzx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.040509 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13e397d3-faf3-41e3-b040-46b90a3e7c2c-kube-api-access-9vtx6" (OuterVolumeSpecName: "kube-api-access-9vtx6") pod "13e397d3-faf3-41e3-b040-46b90a3e7c2c" (UID: "13e397d3-faf3-41e3-b040-46b90a3e7c2c"). InnerVolumeSpecName "kube-api-access-9vtx6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.139418 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9vtx6\" (UniqueName: \"kubernetes.io/projected/13e397d3-faf3-41e3-b040-46b90a3e7c2c-kube-api-access-9vtx6\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.139474 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cvfzx\" (UniqueName: \"kubernetes.io/projected/1b3f08fd-2ae7-419b-8b88-637138d66302-kube-api-access-cvfzx\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.224798 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-b741-account-create-update-vfjtx" event={"ID":"1b3f08fd-2ae7-419b-8b88-637138d66302","Type":"ContainerDied","Data":"d77582e149aa7bcfd8085b38c19319bd66d9c54fda420c5890f1f5228169fa1f"} Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.224889 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d77582e149aa7bcfd8085b38c19319bd66d9c54fda420c5890f1f5228169fa1f" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.225002 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-b741-account-create-update-vfjtx" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.229614 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-bjcgb" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.230773 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81e9-account-create-update-bjcgb" event={"ID":"ee7792b2-73da-4fe7-b0c2-95ab1c382b51","Type":"ContainerDied","Data":"74b19331d91f2bc63c87e42420f1bd30a62bfe194c193f14db9f543faf5168c7"} Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.230819 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="74b19331d91f2bc63c87e42420f1bd30a62bfe194c193f14db9f543faf5168c7" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.242029 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-create-2dcf9" event={"ID":"8b567354-3e7f-446b-af77-f81ae5de44ce","Type":"ContainerDied","Data":"25a1b319ffc5dcc4f38ac2ec15cc65de810ea7b3b1c3cfb83f4e4a05989df9b1"} Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.242082 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="25a1b319ffc5dcc4f38ac2ec15cc65de810ea7b3b1c3cfb83f4e4a05989df9b1" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.242165 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-create-2dcf9" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.244706 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-w8pv4" event={"ID":"82a3dd9f-b277-4e77-9f83-53337cb91b0c","Type":"ContainerStarted","Data":"64d57af49f37dce6380dd89bc69cb755f92e04a7d49c575d865ea9050986c8d0"} Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.260351 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-create-wlsm2" event={"ID":"13e397d3-faf3-41e3-b040-46b90a3e7c2c","Type":"ContainerDied","Data":"95c1fd1e1e18df0f7f151d7d5d61b655edc2aebd6d41cf08b87c7d5ccbb77dae"} Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.260398 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="95c1fd1e1e18df0f7f151d7d5d61b655edc2aebd6d41cf08b87c7d5ccbb77dae" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.261711 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-create-wlsm2" Jan 23 07:14:22 crc kubenswrapper[5102]: I0123 07:14:22.856846 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:22 crc kubenswrapper[5102]: E0123 07:14:22.857114 5102 projected.go:288] Couldn't get configMap openstack/swift-ring-files: configmap "swift-ring-files" not found Jan 23 07:14:22 crc kubenswrapper[5102]: E0123 07:14:22.857150 5102 projected.go:194] Error preparing data for projected volume etc-swift for pod openstack/swift-storage-0: configmap "swift-ring-files" not found Jan 23 07:14:22 crc kubenswrapper[5102]: E0123 07:14:22.857217 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift podName:20474222-aadd-44c0-8c4e-f0b4bd0147c5 nodeName:}" failed. No retries permitted until 2026-01-23 07:14:38.857196489 +0000 UTC m=+1229.677545474 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "etc-swift" (UniqueName: "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift") pod "swift-storage-0" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5") : configmap "swift-ring-files" not found Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.098503 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-create-2t4g9"] Jan 23 07:14:23 crc kubenswrapper[5102]: E0123 07:14:23.099114 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8b567354-3e7f-446b-af77-f81ae5de44ce" containerName="mariadb-database-create" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099147 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="8b567354-3e7f-446b-af77-f81ae5de44ce" containerName="mariadb-database-create" Jan 23 07:14:23 crc kubenswrapper[5102]: E0123 07:14:23.099175 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1b3f08fd-2ae7-419b-8b88-637138d66302" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099190 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1b3f08fd-2ae7-419b-8b88-637138d66302" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: E0123 07:14:23.099221 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3873fe98-794e-42f7-9b3a-b4d9e8ab64f7" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099238 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="3873fe98-794e-42f7-9b3a-b4d9e8ab64f7" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: E0123 07:14:23.099257 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13e397d3-faf3-41e3-b040-46b90a3e7c2c" containerName="mariadb-database-create" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099270 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="13e397d3-faf3-41e3-b040-46b90a3e7c2c" containerName="mariadb-database-create" Jan 23 07:14:23 crc kubenswrapper[5102]: E0123 07:14:23.099292 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ee7792b2-73da-4fe7-b0c2-95ab1c382b51" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099304 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ee7792b2-73da-4fe7-b0c2-95ab1c382b51" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099649 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="3873fe98-794e-42f7-9b3a-b4d9e8ab64f7" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099679 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="13e397d3-faf3-41e3-b040-46b90a3e7c2c" containerName="mariadb-database-create" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099704 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="8b567354-3e7f-446b-af77-f81ae5de44ce" containerName="mariadb-database-create" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099730 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1b3f08fd-2ae7-419b-8b88-637138d66302" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.099750 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ee7792b2-73da-4fe7-b0c2-95ab1c382b51" containerName="mariadb-account-create-update" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.100628 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.114289 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-2t4g9"] Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.263815 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-drzm9\" (UniqueName: \"kubernetes.io/projected/ebe64681-d043-4b12-b1ae-2306ef0e294f-kube-api-access-drzm9\") pod \"keystone-db-create-2t4g9\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.263969 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe64681-d043-4b12-b1ae-2306ef0e294f-operator-scripts\") pod \"keystone-db-create-2t4g9\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.268855 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-w8pv4" event={"ID":"82a3dd9f-b277-4e77-9f83-53337cb91b0c","Type":"ContainerStarted","Data":"32798ef7ae044e71d1056dc26d7107caa27657b6f9372f44acb759953237e2d4"} Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.291469 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/root-account-create-update-w8pv4" podStartSLOduration=3.29143834 podStartE2EDuration="3.29143834s" podCreationTimestamp="2026-01-23 07:14:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:23.287929694 +0000 UTC m=+1214.108278689" watchObservedRunningTime="2026-01-23 07:14:23.29143834 +0000 UTC m=+1214.111787315" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.365439 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-drzm9\" (UniqueName: \"kubernetes.io/projected/ebe64681-d043-4b12-b1ae-2306ef0e294f-kube-api-access-drzm9\") pod \"keystone-db-create-2t4g9\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.365587 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe64681-d043-4b12-b1ae-2306ef0e294f-operator-scripts\") pod \"keystone-db-create-2t4g9\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.366998 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe64681-d043-4b12-b1ae-2306ef0e294f-operator-scripts\") pod \"keystone-db-create-2t4g9\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.394879 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-drzm9\" (UniqueName: \"kubernetes.io/projected/ebe64681-d043-4b12-b1ae-2306ef0e294f-kube-api-access-drzm9\") pod \"keystone-db-create-2t4g9\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.462892 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:23 crc kubenswrapper[5102]: I0123 07:14:23.929145 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-create-2t4g9"] Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.174516 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-db-sync-gp42d"] Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.175981 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.179056 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-config-data" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.179526 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-vxrkv" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.187243 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-gp42d"] Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.277931 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2t4g9" event={"ID":"ebe64681-d043-4b12-b1ae-2306ef0e294f","Type":"ContainerStarted","Data":"4d9a56b175cc4185c212114ef997d2459b218cdbc2075a01ce27a964d06ddb8e"} Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.278003 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2t4g9" event={"ID":"ebe64681-d043-4b12-b1ae-2306ef0e294f","Type":"ContainerStarted","Data":"1d7c1f685e3749246554d36dd265432185f4da7bb7d23d6548707c16850e8e3c"} Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.279666 5102 generic.go:334] "Generic (PLEG): container finished" podID="82a3dd9f-b277-4e77-9f83-53337cb91b0c" containerID="32798ef7ae044e71d1056dc26d7107caa27657b6f9372f44acb759953237e2d4" exitCode=0 Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.279708 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-w8pv4" event={"ID":"82a3dd9f-b277-4e77-9f83-53337cb91b0c","Type":"ContainerDied","Data":"32798ef7ae044e71d1056dc26d7107caa27657b6f9372f44acb759953237e2d4"} Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.287129 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-config-data\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.287176 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-db-sync-config-data\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.287883 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-combined-ca-bundle\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.287993 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pfqjn\" (UniqueName: \"kubernetes.io/projected/d17cb94c-536a-4a89-aac5-802cc52ae2ce-kube-api-access-pfqjn\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.305015 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-create-2t4g9" podStartSLOduration=1.303047743 podStartE2EDuration="1.303047743s" podCreationTimestamp="2026-01-23 07:14:23 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:24.295861464 +0000 UTC m=+1215.116210439" watchObservedRunningTime="2026-01-23 07:14:24.303047743 +0000 UTC m=+1215.123396718" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.389448 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-combined-ca-bundle\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.389513 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pfqjn\" (UniqueName: \"kubernetes.io/projected/d17cb94c-536a-4a89-aac5-802cc52ae2ce-kube-api-access-pfqjn\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.389615 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-config-data\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.389642 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-db-sync-config-data\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.397187 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-config-data\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.397282 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-db-sync-config-data\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.397765 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-combined-ca-bundle\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.423097 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pfqjn\" (UniqueName: \"kubernetes.io/projected/d17cb94c-536a-4a89-aac5-802cc52ae2ce-kube-api-access-pfqjn\") pod \"glance-db-sync-gp42d\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:24 crc kubenswrapper[5102]: I0123 07:14:24.521729 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gp42d" Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.064440 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-db-sync-gp42d"] Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.291639 5102 generic.go:334] "Generic (PLEG): container finished" podID="ebe64681-d043-4b12-b1ae-2306ef0e294f" containerID="4d9a56b175cc4185c212114ef997d2459b218cdbc2075a01ce27a964d06ddb8e" exitCode=0 Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.291702 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2t4g9" event={"ID":"ebe64681-d043-4b12-b1ae-2306ef0e294f","Type":"ContainerDied","Data":"4d9a56b175cc4185c212114ef997d2459b218cdbc2075a01ce27a964d06ddb8e"} Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.293468 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gp42d" event={"ID":"d17cb94c-536a-4a89-aac5-802cc52ae2ce","Type":"ContainerStarted","Data":"fe3d6a54905aecf9cb4bd29261751e97537bac39fdc42198757d2bdb3116e65c"} Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.705955 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.815900 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82a3dd9f-b277-4e77-9f83-53337cb91b0c-operator-scripts\") pod \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.816001 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxr8s\" (UniqueName: \"kubernetes.io/projected/82a3dd9f-b277-4e77-9f83-53337cb91b0c-kube-api-access-nxr8s\") pod \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\" (UID: \"82a3dd9f-b277-4e77-9f83-53337cb91b0c\") " Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.817297 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/82a3dd9f-b277-4e77-9f83-53337cb91b0c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "82a3dd9f-b277-4e77-9f83-53337cb91b0c" (UID: "82a3dd9f-b277-4e77-9f83-53337cb91b0c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.824937 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/82a3dd9f-b277-4e77-9f83-53337cb91b0c-kube-api-access-nxr8s" (OuterVolumeSpecName: "kube-api-access-nxr8s") pod "82a3dd9f-b277-4e77-9f83-53337cb91b0c" (UID: "82a3dd9f-b277-4e77-9f83-53337cb91b0c"). InnerVolumeSpecName "kube-api-access-nxr8s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.920581 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/82a3dd9f-b277-4e77-9f83-53337cb91b0c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:25 crc kubenswrapper[5102]: I0123 07:14:25.920630 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxr8s\" (UniqueName: \"kubernetes.io/projected/82a3dd9f-b277-4e77-9f83-53337cb91b0c-kube-api-access-nxr8s\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.306083 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-w8pv4" Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.309870 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-w8pv4" event={"ID":"82a3dd9f-b277-4e77-9f83-53337cb91b0c","Type":"ContainerDied","Data":"64d57af49f37dce6380dd89bc69cb755f92e04a7d49c575d865ea9050986c8d0"} Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.309921 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="64d57af49f37dce6380dd89bc69cb755f92e04a7d49c575d865ea9050986c8d0" Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.675227 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.840118 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-drzm9\" (UniqueName: \"kubernetes.io/projected/ebe64681-d043-4b12-b1ae-2306ef0e294f-kube-api-access-drzm9\") pod \"ebe64681-d043-4b12-b1ae-2306ef0e294f\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.840751 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe64681-d043-4b12-b1ae-2306ef0e294f-operator-scripts\") pod \"ebe64681-d043-4b12-b1ae-2306ef0e294f\" (UID: \"ebe64681-d043-4b12-b1ae-2306ef0e294f\") " Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.841606 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ebe64681-d043-4b12-b1ae-2306ef0e294f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "ebe64681-d043-4b12-b1ae-2306ef0e294f" (UID: "ebe64681-d043-4b12-b1ae-2306ef0e294f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.850001 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ebe64681-d043-4b12-b1ae-2306ef0e294f-kube-api-access-drzm9" (OuterVolumeSpecName: "kube-api-access-drzm9") pod "ebe64681-d043-4b12-b1ae-2306ef0e294f" (UID: "ebe64681-d043-4b12-b1ae-2306ef0e294f"). InnerVolumeSpecName "kube-api-access-drzm9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.866705 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-w8pv4"] Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.876516 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-w8pv4"] Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.942468 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-drzm9\" (UniqueName: \"kubernetes.io/projected/ebe64681-d043-4b12-b1ae-2306ef0e294f-kube-api-access-drzm9\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:26 crc kubenswrapper[5102]: I0123 07:14:26.942508 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/ebe64681-d043-4b12-b1ae-2306ef0e294f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:27 crc kubenswrapper[5102]: I0123 07:14:27.314892 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-create-2t4g9" event={"ID":"ebe64681-d043-4b12-b1ae-2306ef0e294f","Type":"ContainerDied","Data":"1d7c1f685e3749246554d36dd265432185f4da7bb7d23d6548707c16850e8e3c"} Jan 23 07:14:27 crc kubenswrapper[5102]: I0123 07:14:27.315019 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1d7c1f685e3749246554d36dd265432185f4da7bb7d23d6548707c16850e8e3c" Jan 23 07:14:27 crc kubenswrapper[5102]: I0123 07:14:27.315029 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-create-2t4g9" Jan 23 07:14:27 crc kubenswrapper[5102]: I0123 07:14:27.613119 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="82a3dd9f-b277-4e77-9f83-53337cb91b0c" path="/var/lib/kubelet/pods/82a3dd9f-b277-4e77-9f83-53337cb91b0c/volumes" Jan 23 07:14:29 crc kubenswrapper[5102]: I0123 07:14:29.332703 5102 generic.go:334] "Generic (PLEG): container finished" podID="33b32693-d02a-42ef-b749-3e0b883b3227" containerID="d8ae3da02197050db71977dd19c8a0685c7287d6b6e0cba5a68823e64d602cd4" exitCode=0 Jan 23 07:14:29 crc kubenswrapper[5102]: I0123 07:14:29.332778 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7ctb2" event={"ID":"33b32693-d02a-42ef-b749-3e0b883b3227","Type":"ContainerDied","Data":"d8ae3da02197050db71977dd19c8a0685c7287d6b6e0cba5a68823e64d602cd4"} Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.480579 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-n72gd"] Jan 23 07:14:30 crc kubenswrapper[5102]: E0123 07:14:30.480975 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="82a3dd9f-b277-4e77-9f83-53337cb91b0c" containerName="mariadb-account-create-update" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.480991 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="82a3dd9f-b277-4e77-9f83-53337cb91b0c" containerName="mariadb-account-create-update" Jan 23 07:14:30 crc kubenswrapper[5102]: E0123 07:14:30.480999 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ebe64681-d043-4b12-b1ae-2306ef0e294f" containerName="mariadb-database-create" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.481006 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ebe64681-d043-4b12-b1ae-2306ef0e294f" containerName="mariadb-database-create" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.481152 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="82a3dd9f-b277-4e77-9f83-53337cb91b0c" containerName="mariadb-account-create-update" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.481161 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ebe64681-d043-4b12-b1ae-2306ef0e294f" containerName="mariadb-database-create" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.481714 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.486388 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.492228 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-n72gd"] Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.636660 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bg88j\" (UniqueName: \"kubernetes.io/projected/abe15b3d-f1e3-41a6-bfe7-b52b87667411-kube-api-access-bg88j\") pod \"root-account-create-update-n72gd\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.636784 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abe15b3d-f1e3-41a6-bfe7-b52b87667411-operator-scripts\") pod \"root-account-create-update-n72gd\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.738952 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abe15b3d-f1e3-41a6-bfe7-b52b87667411-operator-scripts\") pod \"root-account-create-update-n72gd\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.739887 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abe15b3d-f1e3-41a6-bfe7-b52b87667411-operator-scripts\") pod \"root-account-create-update-n72gd\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.740211 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bg88j\" (UniqueName: \"kubernetes.io/projected/abe15b3d-f1e3-41a6-bfe7-b52b87667411-kube-api-access-bg88j\") pod \"root-account-create-update-n72gd\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.770968 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bg88j\" (UniqueName: \"kubernetes.io/projected/abe15b3d-f1e3-41a6-bfe7-b52b87667411-kube-api-access-bg88j\") pod \"root-account-create-update-n72gd\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.814818 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.836142 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-rkvv7" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" probeResult="failure" output=< Jan 23 07:14:30 crc kubenswrapper[5102]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 23 07:14:30 crc kubenswrapper[5102]: > Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.839145 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:14:30 crc kubenswrapper[5102]: I0123 07:14:30.851326 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.087633 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ovn-controller-rkvv7-config-ddcjk"] Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.092397 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.096320 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"ovncontroller-extra-scripts" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.106724 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rkvv7-config-ddcjk"] Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.251824 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rpmrr\" (UniqueName: \"kubernetes.io/projected/78114475-f256-441e-bab8-b37ee48c937c-kube-api-access-rpmrr\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.251870 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-additional-scripts\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.251917 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-scripts\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.251942 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-log-ovn\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.252058 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.252176 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run-ovn\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.353200 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rpmrr\" (UniqueName: \"kubernetes.io/projected/78114475-f256-441e-bab8-b37ee48c937c-kube-api-access-rpmrr\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.353246 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-additional-scripts\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.353308 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-scripts\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.353327 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-log-ovn\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.354072 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-additional-scripts\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.354462 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-log-ovn\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.354662 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.354701 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run-ovn\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.354792 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run-ovn\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.354837 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.355771 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-scripts\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.377176 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rpmrr\" (UniqueName: \"kubernetes.io/projected/78114475-f256-441e-bab8-b37ee48c937c-kube-api-access-rpmrr\") pod \"ovn-controller-rkvv7-config-ddcjk\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:31 crc kubenswrapper[5102]: I0123 07:14:31.413965 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:32 crc kubenswrapper[5102]: I0123 07:14:32.358990 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerID="2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5" exitCode=0 Jan 23 07:14:32 crc kubenswrapper[5102]: I0123 07:14:32.359196 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6","Type":"ContainerDied","Data":"2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5"} Jan 23 07:14:33 crc kubenswrapper[5102]: I0123 07:14:33.381516 5102 generic.go:334] "Generic (PLEG): container finished" podID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerID="dac1e501d0f018ad7a331fa5911c1ac5b2f12ea3b755131154923e975ef2f708" exitCode=0 Jan 23 07:14:33 crc kubenswrapper[5102]: I0123 07:14:33.381588 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1ea732e7-d11d-4e12-9d44-f8fcafa50de5","Type":"ContainerDied","Data":"dac1e501d0f018ad7a331fa5911c1ac5b2f12ea3b755131154923e975ef2f708"} Jan 23 07:14:35 crc kubenswrapper[5102]: I0123 07:14:35.822744 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-rkvv7" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" probeResult="failure" output=< Jan 23 07:14:35 crc kubenswrapper[5102]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 23 07:14:35 crc kubenswrapper[5102]: > Jan 23 07:14:38 crc kubenswrapper[5102]: I0123 07:14:38.915312 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:38 crc kubenswrapper[5102]: I0123 07:14:38.927403 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"swift-storage-0\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " pod="openstack/swift-storage-0" Jan 23 07:14:38 crc kubenswrapper[5102]: I0123 07:14:38.969257 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 23 07:14:40 crc kubenswrapper[5102]: I0123 07:14:40.823336 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ovn-controller-rkvv7" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" probeResult="failure" output=< Jan 23 07:14:40 crc kubenswrapper[5102]: ERROR - ovn-controller connection status is 'not connected', expecting 'connected' status Jan 23 07:14:40 crc kubenswrapper[5102]: > Jan 23 07:14:42 crc kubenswrapper[5102]: E0123 07:14:42.606271 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-glance-api@sha256:e4aa4ebbb1e581a12040e9ad2ae2709ac31b5d965bb64fc4252d1028b05c565f" Jan 23 07:14:42 crc kubenswrapper[5102]: E0123 07:14:42.606430 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:glance-db-sync,Image:quay.io/podified-antelope-centos9/openstack-glance-api@sha256:e4aa4ebbb1e581a12040e9ad2ae2709ac31b5d965bb64fc4252d1028b05c565f,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:true,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/glance/glance.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-pfqjn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[MKNOD],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*42415,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:*42415,ProcMount:nil,WindowsOptions:nil,SeccompProfile:&SeccompProfile{Type:RuntimeDefault,LocalhostProfile:nil,},AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod glance-db-sync-gp42d_openstack(d17cb94c-536a-4a89-aac5-802cc52ae2ce): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:14:42 crc kubenswrapper[5102]: E0123 07:14:42.608112 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/glance-db-sync-gp42d" podUID="d17cb94c-536a-4a89-aac5-802cc52ae2ce" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.762352 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.904073 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-ring-data-devices\") pod \"33b32693-d02a-42ef-b749-3e0b883b3227\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.904514 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cw42r\" (UniqueName: \"kubernetes.io/projected/33b32693-d02a-42ef-b749-3e0b883b3227-kube-api-access-cw42r\") pod \"33b32693-d02a-42ef-b749-3e0b883b3227\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.904556 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-combined-ca-bundle\") pod \"33b32693-d02a-42ef-b749-3e0b883b3227\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.904581 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-swiftconf\") pod \"33b32693-d02a-42ef-b749-3e0b883b3227\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.904634 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33b32693-d02a-42ef-b749-3e0b883b3227-etc-swift\") pod \"33b32693-d02a-42ef-b749-3e0b883b3227\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.904661 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-scripts\") pod \"33b32693-d02a-42ef-b749-3e0b883b3227\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.904694 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-dispersionconf\") pod \"33b32693-d02a-42ef-b749-3e0b883b3227\" (UID: \"33b32693-d02a-42ef-b749-3e0b883b3227\") " Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.911786 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/33b32693-d02a-42ef-b749-3e0b883b3227-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "33b32693-d02a-42ef-b749-3e0b883b3227" (UID: "33b32693-d02a-42ef-b749-3e0b883b3227"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.912353 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-ring-data-devices" (OuterVolumeSpecName: "ring-data-devices") pod "33b32693-d02a-42ef-b749-3e0b883b3227" (UID: "33b32693-d02a-42ef-b749-3e0b883b3227"). InnerVolumeSpecName "ring-data-devices". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.917289 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-dispersionconf" (OuterVolumeSpecName: "dispersionconf") pod "33b32693-d02a-42ef-b749-3e0b883b3227" (UID: "33b32693-d02a-42ef-b749-3e0b883b3227"). InnerVolumeSpecName "dispersionconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.920891 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/33b32693-d02a-42ef-b749-3e0b883b3227-kube-api-access-cw42r" (OuterVolumeSpecName: "kube-api-access-cw42r") pod "33b32693-d02a-42ef-b749-3e0b883b3227" (UID: "33b32693-d02a-42ef-b749-3e0b883b3227"). InnerVolumeSpecName "kube-api-access-cw42r". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.959778 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "33b32693-d02a-42ef-b749-3e0b883b3227" (UID: "33b32693-d02a-42ef-b749-3e0b883b3227"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.961964 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-swiftconf" (OuterVolumeSpecName: "swiftconf") pod "33b32693-d02a-42ef-b749-3e0b883b3227" (UID: "33b32693-d02a-42ef-b749-3e0b883b3227"). InnerVolumeSpecName "swiftconf". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:14:42 crc kubenswrapper[5102]: I0123 07:14:42.976812 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-scripts" (OuterVolumeSpecName: "scripts") pod "33b32693-d02a-42ef-b749-3e0b883b3227" (UID: "33b32693-d02a-42ef-b749-3e0b883b3227"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.006979 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cw42r\" (UniqueName: \"kubernetes.io/projected/33b32693-d02a-42ef-b749-3e0b883b3227-kube-api-access-cw42r\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.007059 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.007114 5102 reconciler_common.go:293] "Volume detached for volume \"swiftconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-swiftconf\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.007124 5102 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/empty-dir/33b32693-d02a-42ef-b749-3e0b883b3227-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.007132 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.007141 5102 reconciler_common.go:293] "Volume detached for volume \"dispersionconf\" (UniqueName: \"kubernetes.io/secret/33b32693-d02a-42ef-b749-3e0b883b3227-dispersionconf\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.007149 5102 reconciler_common.go:293] "Volume detached for volume \"ring-data-devices\" (UniqueName: \"kubernetes.io/configmap/33b32693-d02a-42ef-b749-3e0b883b3227-ring-data-devices\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.344152 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ovn-controller-rkvv7-config-ddcjk"] Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.391707 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-n72gd"] Jan 23 07:14:43 crc kubenswrapper[5102]: W0123 07:14:43.417237 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podabe15b3d_f1e3_41a6_bfe7_b52b87667411.slice/crio-bd04810ef5c30e908f968baffc7298d2ff430616469cb2adef00d07a68a9c686 WatchSource:0}: Error finding container bd04810ef5c30e908f968baffc7298d2ff430616469cb2adef00d07a68a9c686: Status 404 returned error can't find the container with id bd04810ef5c30e908f968baffc7298d2ff430616469cb2adef00d07a68a9c686 Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.476963 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-ring-rebalance-7ctb2" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.476974 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-ring-rebalance-7ctb2" event={"ID":"33b32693-d02a-42ef-b749-3e0b883b3227","Type":"ContainerDied","Data":"293e41b8cff01e8c51565671ed3f34942e5e6dfd27725a2c4d479633793451cf"} Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.477407 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="293e41b8cff01e8c51565671ed3f34942e5e6dfd27725a2c4d479633793451cf" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.478739 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n72gd" event={"ID":"abe15b3d-f1e3-41a6-bfe7-b52b87667411","Type":"ContainerStarted","Data":"bd04810ef5c30e908f968baffc7298d2ff430616469cb2adef00d07a68a9c686"} Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.480574 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6","Type":"ContainerStarted","Data":"5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec"} Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.480811 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-server-0" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.483876 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1ea732e7-d11d-4e12-9d44-f8fcafa50de5","Type":"ContainerStarted","Data":"33b3d6f15adbf2ba58af4031167e04bf38158518432643d8d72a903641549c7a"} Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.484260 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.494803 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rkvv7-config-ddcjk" event={"ID":"78114475-f256-441e-bab8-b37ee48c937c","Type":"ContainerStarted","Data":"0f4272575de5d477a931f1032fa40d84832f71d02ff9c998ae491df0757bf05f"} Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.500006 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-storage-0"] Jan 23 07:14:43 crc kubenswrapper[5102]: E0123 07:14:43.502824 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"glance-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-glance-api@sha256:e4aa4ebbb1e581a12040e9ad2ae2709ac31b5d965bb64fc4252d1028b05c565f\\\"\"" pod="openstack/glance-db-sync-gp42d" podUID="d17cb94c-536a-4a89-aac5-802cc52ae2ce" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.512433 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-server-0" podStartSLOduration=48.208449017 podStartE2EDuration="1m25.512414123s" podCreationTimestamp="2026-01-23 07:13:18 +0000 UTC" firstStartedPulling="2026-01-23 07:13:20.773200632 +0000 UTC m=+1151.593549607" lastFinishedPulling="2026-01-23 07:13:58.077165738 +0000 UTC m=+1188.897514713" observedRunningTime="2026-01-23 07:14:43.51161288 +0000 UTC m=+1234.331961855" watchObservedRunningTime="2026-01-23 07:14:43.512414123 +0000 UTC m=+1234.332763098" Jan 23 07:14:43 crc kubenswrapper[5102]: I0123 07:14:43.566755 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/rabbitmq-cell1-server-0" podStartSLOduration=-9223371952.288038 podStartE2EDuration="1m24.566737719s" podCreationTimestamp="2026-01-23 07:13:19 +0000 UTC" firstStartedPulling="2026-01-23 07:13:21.08100111 +0000 UTC m=+1151.901350085" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:43.563001195 +0000 UTC m=+1234.383350180" watchObservedRunningTime="2026-01-23 07:14:43.566737719 +0000 UTC m=+1234.387086684" Jan 23 07:14:44 crc kubenswrapper[5102]: I0123 07:14:44.518272 5102 generic.go:334] "Generic (PLEG): container finished" podID="abe15b3d-f1e3-41a6-bfe7-b52b87667411" containerID="6d46e8fc78e649e7afb857616146a41ab67408309a95afe99332c144e8fb1f6f" exitCode=0 Jan 23 07:14:44 crc kubenswrapper[5102]: I0123 07:14:44.518720 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n72gd" event={"ID":"abe15b3d-f1e3-41a6-bfe7-b52b87667411","Type":"ContainerDied","Data":"6d46e8fc78e649e7afb857616146a41ab67408309a95afe99332c144e8fb1f6f"} Jan 23 07:14:44 crc kubenswrapper[5102]: I0123 07:14:44.522464 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"0dfbd931491f7396bdacb10c215bb3018f09e51cf5fd12f72f6465cdd1e1e8f5"} Jan 23 07:14:44 crc kubenswrapper[5102]: I0123 07:14:44.525126 5102 generic.go:334] "Generic (PLEG): container finished" podID="78114475-f256-441e-bab8-b37ee48c937c" containerID="e6c32afee6c95235234ebc27aeba6538794a2c26897e18df4ae434341472026c" exitCode=0 Jan 23 07:14:44 crc kubenswrapper[5102]: I0123 07:14:44.525210 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rkvv7-config-ddcjk" event={"ID":"78114475-f256-441e-bab8-b37ee48c937c","Type":"ContainerDied","Data":"e6c32afee6c95235234ebc27aeba6538794a2c26897e18df4ae434341472026c"} Jan 23 07:14:45 crc kubenswrapper[5102]: I0123 07:14:45.536923 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13"} Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.404870 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ovn-controller-rkvv7" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.542778 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.554111 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93"} Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.554161 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5"} Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.585757 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-n72gd" event={"ID":"abe15b3d-f1e3-41a6-bfe7-b52b87667411","Type":"ContainerDied","Data":"bd04810ef5c30e908f968baffc7298d2ff430616469cb2adef00d07a68a9c686"} Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.585803 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bd04810ef5c30e908f968baffc7298d2ff430616469cb2adef00d07a68a9c686" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.585859 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-n72gd" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.620654 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.693982 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run-ovn\") pod \"78114475-f256-441e-bab8-b37ee48c937c\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694072 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rpmrr\" (UniqueName: \"kubernetes.io/projected/78114475-f256-441e-bab8-b37ee48c937c-kube-api-access-rpmrr\") pod \"78114475-f256-441e-bab8-b37ee48c937c\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694106 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-additional-scripts\") pod \"78114475-f256-441e-bab8-b37ee48c937c\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694106 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "78114475-f256-441e-bab8-b37ee48c937c" (UID: "78114475-f256-441e-bab8-b37ee48c937c"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694163 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bg88j\" (UniqueName: \"kubernetes.io/projected/abe15b3d-f1e3-41a6-bfe7-b52b87667411-kube-api-access-bg88j\") pod \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694187 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abe15b3d-f1e3-41a6-bfe7-b52b87667411-operator-scripts\") pod \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\" (UID: \"abe15b3d-f1e3-41a6-bfe7-b52b87667411\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694217 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-scripts\") pod \"78114475-f256-441e-bab8-b37ee48c937c\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694255 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-log-ovn\") pod \"78114475-f256-441e-bab8-b37ee48c937c\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694366 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run\") pod \"78114475-f256-441e-bab8-b37ee48c937c\" (UID: \"78114475-f256-441e-bab8-b37ee48c937c\") " Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694711 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run" (OuterVolumeSpecName: "var-run") pod "78114475-f256-441e-bab8-b37ee48c937c" (UID: "78114475-f256-441e-bab8-b37ee48c937c"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694774 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "78114475-f256-441e-bab8-b37ee48c937c" (UID: "78114475-f256-441e-bab8-b37ee48c937c"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694835 5102 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.694848 5102 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.695242 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/abe15b3d-f1e3-41a6-bfe7-b52b87667411-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "abe15b3d-f1e3-41a6-bfe7-b52b87667411" (UID: "abe15b3d-f1e3-41a6-bfe7-b52b87667411"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.695272 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-additional-scripts" (OuterVolumeSpecName: "additional-scripts") pod "78114475-f256-441e-bab8-b37ee48c937c" (UID: "78114475-f256-441e-bab8-b37ee48c937c"). InnerVolumeSpecName "additional-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.695427 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-scripts" (OuterVolumeSpecName: "scripts") pod "78114475-f256-441e-bab8-b37ee48c937c" (UID: "78114475-f256-441e-bab8-b37ee48c937c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.699411 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/abe15b3d-f1e3-41a6-bfe7-b52b87667411-kube-api-access-bg88j" (OuterVolumeSpecName: "kube-api-access-bg88j") pod "abe15b3d-f1e3-41a6-bfe7-b52b87667411" (UID: "abe15b3d-f1e3-41a6-bfe7-b52b87667411"). InnerVolumeSpecName "kube-api-access-bg88j". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.700063 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78114475-f256-441e-bab8-b37ee48c937c-kube-api-access-rpmrr" (OuterVolumeSpecName: "kube-api-access-rpmrr") pod "78114475-f256-441e-bab8-b37ee48c937c" (UID: "78114475-f256-441e-bab8-b37ee48c937c"). InnerVolumeSpecName "kube-api-access-rpmrr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.769319 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.769394 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.796883 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rpmrr\" (UniqueName: \"kubernetes.io/projected/78114475-f256-441e-bab8-b37ee48c937c-kube-api-access-rpmrr\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.796928 5102 reconciler_common.go:293] "Volume detached for volume \"additional-scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-additional-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.796939 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bg88j\" (UniqueName: \"kubernetes.io/projected/abe15b3d-f1e3-41a6-bfe7-b52b87667411-kube-api-access-bg88j\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.796964 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/abe15b3d-f1e3-41a6-bfe7-b52b87667411-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.796975 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/78114475-f256-441e-bab8-b37ee48c937c-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:46 crc kubenswrapper[5102]: I0123 07:14:46.796984 5102 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/78114475-f256-441e-bab8-b37ee48c937c-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 07:14:47 crc kubenswrapper[5102]: I0123 07:14:47.605002 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7-config-ddcjk" Jan 23 07:14:47 crc kubenswrapper[5102]: I0123 07:14:47.613716 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe"} Jan 23 07:14:47 crc kubenswrapper[5102]: I0123 07:14:47.613780 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rkvv7-config-ddcjk" event={"ID":"78114475-f256-441e-bab8-b37ee48c937c","Type":"ContainerDied","Data":"0f4272575de5d477a931f1032fa40d84832f71d02ff9c998ae491df0757bf05f"} Jan 23 07:14:47 crc kubenswrapper[5102]: I0123 07:14:47.613809 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="0f4272575de5d477a931f1032fa40d84832f71d02ff9c998ae491df0757bf05f" Jan 23 07:14:47 crc kubenswrapper[5102]: I0123 07:14:47.743603 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-rkvv7-config-ddcjk"] Jan 23 07:14:47 crc kubenswrapper[5102]: I0123 07:14:47.765978 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-rkvv7-config-ddcjk"] Jan 23 07:14:48 crc kubenswrapper[5102]: I0123 07:14:48.617038 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095"} Jan 23 07:14:49 crc kubenswrapper[5102]: I0123 07:14:49.618057 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78114475-f256-441e-bab8-b37ee48c937c" path="/var/lib/kubelet/pods/78114475-f256-441e-bab8-b37ee48c937c/volumes" Jan 23 07:14:49 crc kubenswrapper[5102]: I0123 07:14:49.629180 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59"} Jan 23 07:14:49 crc kubenswrapper[5102]: I0123 07:14:49.629242 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314"} Jan 23 07:14:49 crc kubenswrapper[5102]: I0123 07:14:49.629256 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb"} Jan 23 07:14:51 crc kubenswrapper[5102]: I0123 07:14:51.656066 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332"} Jan 23 07:14:51 crc kubenswrapper[5102]: I0123 07:14:51.657234 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012"} Jan 23 07:14:51 crc kubenswrapper[5102]: I0123 07:14:51.657258 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4"} Jan 23 07:14:51 crc kubenswrapper[5102]: I0123 07:14:51.657271 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8"} Jan 23 07:14:51 crc kubenswrapper[5102]: I0123 07:14:51.916141 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-n72gd"] Jan 23 07:14:51 crc kubenswrapper[5102]: I0123 07:14:51.927217 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-n72gd"] Jan 23 07:14:52 crc kubenswrapper[5102]: I0123 07:14:52.673812 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e"} Jan 23 07:14:52 crc kubenswrapper[5102]: I0123 07:14:52.675765 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8"} Jan 23 07:14:52 crc kubenswrapper[5102]: I0123 07:14:52.675831 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerStarted","Data":"1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68"} Jan 23 07:14:52 crc kubenswrapper[5102]: I0123 07:14:52.766249 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-storage-0" podStartSLOduration=40.704139567 podStartE2EDuration="47.766216463s" podCreationTimestamp="2026-01-23 07:14:05 +0000 UTC" firstStartedPulling="2026-01-23 07:14:43.526172203 +0000 UTC m=+1234.346521178" lastFinishedPulling="2026-01-23 07:14:50.588249089 +0000 UTC m=+1241.408598074" observedRunningTime="2026-01-23 07:14:52.751493234 +0000 UTC m=+1243.571842219" watchObservedRunningTime="2026-01-23 07:14:52.766216463 +0000 UTC m=+1243.586565438" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.107523 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-8db84466c-tqtcx"] Jan 23 07:14:53 crc kubenswrapper[5102]: E0123 07:14:53.108385 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="33b32693-d02a-42ef-b749-3e0b883b3227" containerName="swift-ring-rebalance" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.108447 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="33b32693-d02a-42ef-b749-3e0b883b3227" containerName="swift-ring-rebalance" Jan 23 07:14:53 crc kubenswrapper[5102]: E0123 07:14:53.108510 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78114475-f256-441e-bab8-b37ee48c937c" containerName="ovn-config" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.108569 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="78114475-f256-441e-bab8-b37ee48c937c" containerName="ovn-config" Jan 23 07:14:53 crc kubenswrapper[5102]: E0123 07:14:53.108610 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="abe15b3d-f1e3-41a6-bfe7-b52b87667411" containerName="mariadb-account-create-update" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.108629 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="abe15b3d-f1e3-41a6-bfe7-b52b87667411" containerName="mariadb-account-create-update" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.108918 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="33b32693-d02a-42ef-b749-3e0b883b3227" containerName="swift-ring-rebalance" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.108949 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="78114475-f256-441e-bab8-b37ee48c937c" containerName="ovn-config" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.108972 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="abe15b3d-f1e3-41a6-bfe7-b52b87667411" containerName="mariadb-account-create-update" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.110588 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.114503 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"dns-swift-storage-0" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.140408 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8db84466c-tqtcx"] Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.234803 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-sb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.234881 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-swift-storage-0\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.234931 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58nnb\" (UniqueName: \"kubernetes.io/projected/1de6d484-6cfc-4529-a911-9ee8058ae867-kube-api-access-58nnb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.235007 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-svc\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.235033 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-config\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.235050 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-nb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.337669 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-sb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.337797 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-swift-storage-0\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.337869 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58nnb\" (UniqueName: \"kubernetes.io/projected/1de6d484-6cfc-4529-a911-9ee8058ae867-kube-api-access-58nnb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.337968 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-svc\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.338010 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-config\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.338043 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-nb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.338907 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-sb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.339231 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-swift-storage-0\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.339439 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-nb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.340514 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-config\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.342160 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-svc\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.364228 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58nnb\" (UniqueName: \"kubernetes.io/projected/1de6d484-6cfc-4529-a911-9ee8058ae867-kube-api-access-58nnb\") pod \"dnsmasq-dns-8db84466c-tqtcx\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.445613 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:53 crc kubenswrapper[5102]: I0123 07:14:53.635768 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="abe15b3d-f1e3-41a6-bfe7-b52b87667411" path="/var/lib/kubelet/pods/abe15b3d-f1e3-41a6-bfe7-b52b87667411/volumes" Jan 23 07:14:54 crc kubenswrapper[5102]: I0123 07:14:54.027400 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-8db84466c-tqtcx"] Jan 23 07:14:54 crc kubenswrapper[5102]: W0123 07:14:54.028890 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1de6d484_6cfc_4529_a911_9ee8058ae867.slice/crio-916d0db5896dacb2f6c9e0808503a29787b7a335b1e9f04f871b9d66b148eff0 WatchSource:0}: Error finding container 916d0db5896dacb2f6c9e0808503a29787b7a335b1e9f04f871b9d66b148eff0: Status 404 returned error can't find the container with id 916d0db5896dacb2f6c9e0808503a29787b7a335b1e9f04f871b9d66b148eff0 Jan 23 07:14:54 crc kubenswrapper[5102]: I0123 07:14:54.701621 5102 generic.go:334] "Generic (PLEG): container finished" podID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerID="741cc1b445f0292f2345423583736ecbf8ebe8026251380cadfbaf2d7229e678" exitCode=0 Jan 23 07:14:54 crc kubenswrapper[5102]: I0123 07:14:54.701727 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" event={"ID":"1de6d484-6cfc-4529-a911-9ee8058ae867","Type":"ContainerDied","Data":"741cc1b445f0292f2345423583736ecbf8ebe8026251380cadfbaf2d7229e678"} Jan 23 07:14:54 crc kubenswrapper[5102]: I0123 07:14:54.701785 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" event={"ID":"1de6d484-6cfc-4529-a911-9ee8058ae867","Type":"ContainerStarted","Data":"916d0db5896dacb2f6c9e0808503a29787b7a335b1e9f04f871b9d66b148eff0"} Jan 23 07:14:55 crc kubenswrapper[5102]: I0123 07:14:55.726932 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" event={"ID":"1de6d484-6cfc-4529-a911-9ee8058ae867","Type":"ContainerStarted","Data":"784e454f669a640e0d691a762f7f2be1680040e25c08425b68c7dfbfbec03c66"} Jan 23 07:14:55 crc kubenswrapper[5102]: I0123 07:14:55.727453 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:14:55 crc kubenswrapper[5102]: I0123 07:14:55.768058 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" podStartSLOduration=2.768024363 podStartE2EDuration="2.768024363s" podCreationTimestamp="2026-01-23 07:14:53 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:14:55.757500966 +0000 UTC m=+1246.577850001" watchObservedRunningTime="2026-01-23 07:14:55.768024363 +0000 UTC m=+1246.588373388" Jan 23 07:14:56 crc kubenswrapper[5102]: I0123 07:14:56.927361 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-kmst8"] Jan 23 07:14:56 crc kubenswrapper[5102]: I0123 07:14:56.928950 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:56 crc kubenswrapper[5102]: I0123 07:14:56.933080 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 23 07:14:56 crc kubenswrapper[5102]: I0123 07:14:56.938921 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kmst8"] Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.012724 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9tj45\" (UniqueName: \"kubernetes.io/projected/87ad8d4b-f678-4d24-add7-4af5cb947162-kube-api-access-9tj45\") pod \"root-account-create-update-kmst8\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.012824 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ad8d4b-f678-4d24-add7-4af5cb947162-operator-scripts\") pod \"root-account-create-update-kmst8\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.114113 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ad8d4b-f678-4d24-add7-4af5cb947162-operator-scripts\") pod \"root-account-create-update-kmst8\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.114231 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9tj45\" (UniqueName: \"kubernetes.io/projected/87ad8d4b-f678-4d24-add7-4af5cb947162-kube-api-access-9tj45\") pod \"root-account-create-update-kmst8\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.116386 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ad8d4b-f678-4d24-add7-4af5cb947162-operator-scripts\") pod \"root-account-create-update-kmst8\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.137673 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9tj45\" (UniqueName: \"kubernetes.io/projected/87ad8d4b-f678-4d24-add7-4af5cb947162-kube-api-access-9tj45\") pod \"root-account-create-update-kmst8\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.255858 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmst8" Jan 23 07:14:57 crc kubenswrapper[5102]: I0123 07:14:57.760362 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-kmst8"] Jan 23 07:14:57 crc kubenswrapper[5102]: W0123 07:14:57.772885 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod87ad8d4b_f678_4d24_add7_4af5cb947162.slice/crio-54b3031fd58f9cfa944e61dc8e76ed6fba83510c3870d853422debea3af8e846 WatchSource:0}: Error finding container 54b3031fd58f9cfa944e61dc8e76ed6fba83510c3870d853422debea3af8e846: Status 404 returned error can't find the container with id 54b3031fd58f9cfa944e61dc8e76ed6fba83510c3870d853422debea3af8e846 Jan 23 07:14:58 crc kubenswrapper[5102]: I0123 07:14:58.771028 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gp42d" event={"ID":"d17cb94c-536a-4a89-aac5-802cc52ae2ce","Type":"ContainerStarted","Data":"4d42dba66296dcae374056c93eefa27df0374a17bdd663a51a98a8779a3b1ae0"} Jan 23 07:14:58 crc kubenswrapper[5102]: I0123 07:14:58.772271 5102 generic.go:334] "Generic (PLEG): container finished" podID="87ad8d4b-f678-4d24-add7-4af5cb947162" containerID="17ec3f7e6cb9c3d8a6294e7054c5af457a6f4c6a3ebb59c1056f1d6e8a6ba5c5" exitCode=0 Jan 23 07:14:58 crc kubenswrapper[5102]: I0123 07:14:58.772327 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kmst8" event={"ID":"87ad8d4b-f678-4d24-add7-4af5cb947162","Type":"ContainerDied","Data":"17ec3f7e6cb9c3d8a6294e7054c5af457a6f4c6a3ebb59c1056f1d6e8a6ba5c5"} Jan 23 07:14:58 crc kubenswrapper[5102]: I0123 07:14:58.772368 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kmst8" event={"ID":"87ad8d4b-f678-4d24-add7-4af5cb947162","Type":"ContainerStarted","Data":"54b3031fd58f9cfa944e61dc8e76ed6fba83510c3870d853422debea3af8e846"} Jan 23 07:14:58 crc kubenswrapper[5102]: I0123 07:14:58.840203 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-db-sync-gp42d" podStartSLOduration=2.396320351 podStartE2EDuration="34.840175121s" podCreationTimestamp="2026-01-23 07:14:24 +0000 UTC" firstStartedPulling="2026-01-23 07:14:25.071222728 +0000 UTC m=+1215.891571723" lastFinishedPulling="2026-01-23 07:14:57.515077518 +0000 UTC m=+1248.335426493" observedRunningTime="2026-01-23 07:14:58.801508988 +0000 UTC m=+1249.621857993" watchObservedRunningTime="2026-01-23 07:14:58.840175121 +0000 UTC m=+1249.660524136" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.149410 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.152183 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.155242 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.155434 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.170701 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.173532 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3333009-db65-4416-8fee-26e53bd734cc-secret-volume\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.173649 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3333009-db65-4416-8fee-26e53bd734cc-config-volume\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.173821 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ts669\" (UniqueName: \"kubernetes.io/projected/d3333009-db65-4416-8fee-26e53bd734cc-kube-api-access-ts669\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.274608 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ts669\" (UniqueName: \"kubernetes.io/projected/d3333009-db65-4416-8fee-26e53bd734cc-kube-api-access-ts669\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.274698 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3333009-db65-4416-8fee-26e53bd734cc-secret-volume\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.274726 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3333009-db65-4416-8fee-26e53bd734cc-config-volume\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.275679 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3333009-db65-4416-8fee-26e53bd734cc-config-volume\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.283363 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3333009-db65-4416-8fee-26e53bd734cc-secret-volume\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.291584 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ts669\" (UniqueName: \"kubernetes.io/projected/d3333009-db65-4416-8fee-26e53bd734cc-kube-api-access-ts669\") pod \"collect-profiles-29485875-zs5hd\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.292784 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-server-0" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.364811 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmst8" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.475123 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.477181 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ad8d4b-f678-4d24-add7-4af5cb947162-operator-scripts\") pod \"87ad8d4b-f678-4d24-add7-4af5cb947162\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.477229 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9tj45\" (UniqueName: \"kubernetes.io/projected/87ad8d4b-f678-4d24-add7-4af5cb947162-kube-api-access-9tj45\") pod \"87ad8d4b-f678-4d24-add7-4af5cb947162\" (UID: \"87ad8d4b-f678-4d24-add7-4af5cb947162\") " Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.478152 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87ad8d4b-f678-4d24-add7-4af5cb947162-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "87ad8d4b-f678-4d24-add7-4af5cb947162" (UID: "87ad8d4b-f678-4d24-add7-4af5cb947162"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.499774 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87ad8d4b-f678-4d24-add7-4af5cb947162-kube-api-access-9tj45" (OuterVolumeSpecName: "kube-api-access-9tj45") pod "87ad8d4b-f678-4d24-add7-4af5cb947162" (UID: "87ad8d4b-f678-4d24-add7-4af5cb947162"). InnerVolumeSpecName "kube-api-access-9tj45". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.579691 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/87ad8d4b-f678-4d24-add7-4af5cb947162-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.580250 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9tj45\" (UniqueName: \"kubernetes.io/projected/87ad8d4b-f678-4d24-add7-4af5cb947162-kube-api-access-9tj45\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.601759 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.709911 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-create-6wzvt"] Jan 23 07:15:00 crc kubenswrapper[5102]: E0123 07:15:00.710248 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="87ad8d4b-f678-4d24-add7-4af5cb947162" containerName="mariadb-account-create-update" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.710264 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="87ad8d4b-f678-4d24-add7-4af5cb947162" containerName="mariadb-account-create-update" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.710429 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="87ad8d4b-f678-4d24-add7-4af5cb947162" containerName="mariadb-account-create-update" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.717804 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.735998 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-f904-account-create-update-6s8dg"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.737090 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.745710 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.754654 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6wzvt"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.764800 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f904-account-create-update-6s8dg"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.823086 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-kmst8" event={"ID":"87ad8d4b-f678-4d24-add7-4af5cb947162","Type":"ContainerDied","Data":"54b3031fd58f9cfa944e61dc8e76ed6fba83510c3870d853422debea3af8e846"} Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.823429 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="54b3031fd58f9cfa944e61dc8e76ed6fba83510c3870d853422debea3af8e846" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.823491 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-kmst8" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.847988 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-f350-account-create-update-576nm"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.849115 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.856727 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.886207 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd81582-411c-483f-a6e0-08d3172ff873-operator-scripts\") pod \"cinder-db-create-6wzvt\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.886262 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pkpqg\" (UniqueName: \"kubernetes.io/projected/2ec7cc04-6c30-49ca-91c8-99bb4200af09-kube-api-access-pkpqg\") pod \"barbican-f904-account-create-update-6s8dg\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.886331 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ec7cc04-6c30-49ca-91c8-99bb4200af09-operator-scripts\") pod \"barbican-f904-account-create-update-6s8dg\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.886375 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jlpkn\" (UniqueName: \"kubernetes.io/projected/2dd81582-411c-483f-a6e0-08d3172ff873-kube-api-access-jlpkn\") pod \"cinder-db-create-6wzvt\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.886480 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-create-vhjgv"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.888162 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.917809 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-vhjgv"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.930921 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f350-account-create-update-576nm"] Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.989672 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0326d61e-cade-48ab-87e9-7010d5f95ea8-operator-scripts\") pod \"cinder-f350-account-create-update-576nm\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.989710 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d8c9762-bd4b-424c-943b-2b114c08211e-operator-scripts\") pod \"barbican-db-create-vhjgv\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.989930 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ec7cc04-6c30-49ca-91c8-99bb4200af09-operator-scripts\") pod \"barbican-f904-account-create-update-6s8dg\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.990164 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jlpkn\" (UniqueName: \"kubernetes.io/projected/2dd81582-411c-483f-a6e0-08d3172ff873-kube-api-access-jlpkn\") pod \"cinder-db-create-6wzvt\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.990358 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gtplb\" (UniqueName: \"kubernetes.io/projected/4d8c9762-bd4b-424c-943b-2b114c08211e-kube-api-access-gtplb\") pod \"barbican-db-create-vhjgv\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.990454 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd81582-411c-483f-a6e0-08d3172ff873-operator-scripts\") pod \"cinder-db-create-6wzvt\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.990533 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-z9vjs\" (UniqueName: \"kubernetes.io/projected/0326d61e-cade-48ab-87e9-7010d5f95ea8-kube-api-access-z9vjs\") pod \"cinder-f350-account-create-update-576nm\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.990620 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pkpqg\" (UniqueName: \"kubernetes.io/projected/2ec7cc04-6c30-49ca-91c8-99bb4200af09-kube-api-access-pkpqg\") pod \"barbican-f904-account-create-update-6s8dg\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.990680 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ec7cc04-6c30-49ca-91c8-99bb4200af09-operator-scripts\") pod \"barbican-f904-account-create-update-6s8dg\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:00 crc kubenswrapper[5102]: I0123 07:15:00.991666 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd81582-411c-483f-a6e0-08d3172ff873-operator-scripts\") pod \"cinder-db-create-6wzvt\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.009029 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pkpqg\" (UniqueName: \"kubernetes.io/projected/2ec7cc04-6c30-49ca-91c8-99bb4200af09-kube-api-access-pkpqg\") pod \"barbican-f904-account-create-update-6s8dg\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.010617 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jlpkn\" (UniqueName: \"kubernetes.io/projected/2dd81582-411c-483f-a6e0-08d3172ff873-kube-api-access-jlpkn\") pod \"cinder-db-create-6wzvt\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.055987 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.070497 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.092051 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gtplb\" (UniqueName: \"kubernetes.io/projected/4d8c9762-bd4b-424c-943b-2b114c08211e-kube-api-access-gtplb\") pod \"barbican-db-create-vhjgv\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.092111 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-z9vjs\" (UniqueName: \"kubernetes.io/projected/0326d61e-cade-48ab-87e9-7010d5f95ea8-kube-api-access-z9vjs\") pod \"cinder-f350-account-create-update-576nm\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.092170 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0326d61e-cade-48ab-87e9-7010d5f95ea8-operator-scripts\") pod \"cinder-f350-account-create-update-576nm\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.092188 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d8c9762-bd4b-424c-943b-2b114c08211e-operator-scripts\") pod \"barbican-db-create-vhjgv\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.092986 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d8c9762-bd4b-424c-943b-2b114c08211e-operator-scripts\") pod \"barbican-db-create-vhjgv\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.093777 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0326d61e-cade-48ab-87e9-7010d5f95ea8-operator-scripts\") pod \"cinder-f350-account-create-update-576nm\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.101450 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-db-sync-k7zdm"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.103210 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.107867 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.108079 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.108200 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-f5hn2" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.108355 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.113988 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-create-z6f8v"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.115341 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.115941 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-z9vjs\" (UniqueName: \"kubernetes.io/projected/0326d61e-cade-48ab-87e9-7010d5f95ea8-kube-api-access-z9vjs\") pod \"cinder-f350-account-create-update-576nm\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.120076 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gtplb\" (UniqueName: \"kubernetes.io/projected/4d8c9762-bd4b-424c-943b-2b114c08211e-kube-api-access-gtplb\") pod \"barbican-db-create-vhjgv\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.137379 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-k7zdm"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.158522 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-z6f8v"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.166870 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-c788-account-create-update-rwhzz"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.168128 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.172034 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.192206 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.193793 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxbbv\" (UniqueName: \"kubernetes.io/projected/b5890481-2315-483e-868e-6145bffd53c3-kube-api-access-fxbbv\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.193886 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88a152e3-a95a-4c00-a905-7b88c737c0fc-operator-scripts\") pod \"neutron-db-create-z6f8v\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.193949 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-combined-ca-bundle\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.193976 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-config-data\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.194018 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lkcnm\" (UniqueName: \"kubernetes.io/projected/88a152e3-a95a-4c00-a905-7b88c737c0fc-kube-api-access-lkcnm\") pod \"neutron-db-create-z6f8v\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.216616 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c788-account-create-update-rwhzz"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.281628 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.293202 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.301578 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvd9w\" (UniqueName: \"kubernetes.io/projected/dc3a1422-92f2-45be-9e26-4768b42d9505-kube-api-access-hvd9w\") pod \"neutron-c788-account-create-update-rwhzz\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.301677 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lkcnm\" (UniqueName: \"kubernetes.io/projected/88a152e3-a95a-4c00-a905-7b88c737c0fc-kube-api-access-lkcnm\") pod \"neutron-db-create-z6f8v\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.301746 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxbbv\" (UniqueName: \"kubernetes.io/projected/b5890481-2315-483e-868e-6145bffd53c3-kube-api-access-fxbbv\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.301874 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc3a1422-92f2-45be-9e26-4768b42d9505-operator-scripts\") pod \"neutron-c788-account-create-update-rwhzz\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.301932 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88a152e3-a95a-4c00-a905-7b88c737c0fc-operator-scripts\") pod \"neutron-db-create-z6f8v\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.301983 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-combined-ca-bundle\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.302012 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-config-data\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.316774 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-combined-ca-bundle\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.316815 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88a152e3-a95a-4c00-a905-7b88c737c0fc-operator-scripts\") pod \"neutron-db-create-z6f8v\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.329485 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lkcnm\" (UniqueName: \"kubernetes.io/projected/88a152e3-a95a-4c00-a905-7b88c737c0fc-kube-api-access-lkcnm\") pod \"neutron-db-create-z6f8v\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.329709 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-config-data\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.346981 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxbbv\" (UniqueName: \"kubernetes.io/projected/b5890481-2315-483e-868e-6145bffd53c3-kube-api-access-fxbbv\") pod \"keystone-db-sync-k7zdm\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.406649 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvd9w\" (UniqueName: \"kubernetes.io/projected/dc3a1422-92f2-45be-9e26-4768b42d9505-kube-api-access-hvd9w\") pod \"neutron-c788-account-create-update-rwhzz\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.406781 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc3a1422-92f2-45be-9e26-4768b42d9505-operator-scripts\") pod \"neutron-c788-account-create-update-rwhzz\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.407564 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc3a1422-92f2-45be-9e26-4768b42d9505-operator-scripts\") pod \"neutron-c788-account-create-update-rwhzz\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.436167 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.438485 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvd9w\" (UniqueName: \"kubernetes.io/projected/dc3a1422-92f2-45be-9e26-4768b42d9505-kube-api-access-hvd9w\") pod \"neutron-c788-account-create-update-rwhzz\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.441178 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.478385 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-create-6wzvt"] Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.497670 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:01 crc kubenswrapper[5102]: W0123 07:15:01.505848 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod2dd81582_411c_483f_a6e0_08d3172ff873.slice/crio-5fcf2a5df234126aa6f75b813fc375db00e859cd9d60fbc2074280b42f7192bb WatchSource:0}: Error finding container 5fcf2a5df234126aa6f75b813fc375db00e859cd9d60fbc2074280b42f7192bb: Status 404 returned error can't find the container with id 5fcf2a5df234126aa6f75b813fc375db00e859cd9d60fbc2074280b42f7192bb Jan 23 07:15:01 crc kubenswrapper[5102]: I0123 07:15:01.757049 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f904-account-create-update-6s8dg"] Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.840789 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" event={"ID":"d3333009-db65-4416-8fee-26e53bd734cc","Type":"ContainerStarted","Data":"9ec61b7cca495dfcdd0f7462fc5b68e697a6891ec49580d4bec120d4330cecc7"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.840832 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" event={"ID":"d3333009-db65-4416-8fee-26e53bd734cc","Type":"ContainerStarted","Data":"4a11e3fb68027b1aa709287df9db0d87cf831a9dfb429b10d2c09abd6eca6cf8"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.843493 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f904-account-create-update-6s8dg" event={"ID":"2ec7cc04-6c30-49ca-91c8-99bb4200af09","Type":"ContainerStarted","Data":"612ddf8a6f7c8f895e0b9b8fba3b9dc986edf5aa0172ed6a1fb8a1bbe3b98b7a"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.845525 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6wzvt" event={"ID":"2dd81582-411c-483f-a6e0-08d3172ff873","Type":"ContainerStarted","Data":"eaf789feee4610f2cd2ae2c77e24ab5397a84a378cbf76fa694ebd576b9d3c39"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.845577 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6wzvt" event={"ID":"2dd81582-411c-483f-a6e0-08d3172ff873","Type":"ContainerStarted","Data":"5fcf2a5df234126aa6f75b813fc375db00e859cd9d60fbc2074280b42f7192bb"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.845590 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f350-account-create-update-576nm"] Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.861649 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" podStartSLOduration=1.861633823 podStartE2EDuration="1.861633823s" podCreationTimestamp="2026-01-23 07:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:01.857741583 +0000 UTC m=+1252.678090558" watchObservedRunningTime="2026-01-23 07:15:01.861633823 +0000 UTC m=+1252.681982798" Jan 23 07:15:02 crc kubenswrapper[5102]: W0123 07:15:01.873430 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0326d61e_cade_48ab_87e9_7010d5f95ea8.slice/crio-abf9c7f83a01e5550b6469f6fab5fd9ac6c291976d8982180caac77237ae85ee WatchSource:0}: Error finding container abf9c7f83a01e5550b6469f6fab5fd9ac6c291976d8982180caac77237ae85ee: Status 404 returned error can't find the container with id abf9c7f83a01e5550b6469f6fab5fd9ac6c291976d8982180caac77237ae85ee Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.878504 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-create-6wzvt" podStartSLOduration=1.878488728 podStartE2EDuration="1.878488728s" podCreationTimestamp="2026-01-23 07:15:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:01.876963771 +0000 UTC m=+1252.697312746" watchObservedRunningTime="2026-01-23 07:15:01.878488728 +0000 UTC m=+1252.698837703" Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:01.982427 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-create-vhjgv"] Jan 23 07:15:02 crc kubenswrapper[5102]: W0123 07:15:02.004789 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod4d8c9762_bd4b_424c_943b_2b114c08211e.slice/crio-09eaef42973c12d7016c637665b6eb190d38f58050f5cfb03be31e1d2ce43a06 WatchSource:0}: Error finding container 09eaef42973c12d7016c637665b6eb190d38f58050f5cfb03be31e1d2ce43a06: Status 404 returned error can't find the container with id 09eaef42973c12d7016c637665b6eb190d38f58050f5cfb03be31e1d2ce43a06 Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.613461 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-db-sync-k7zdm"] Jan 23 07:15:02 crc kubenswrapper[5102]: W0123 07:15:02.619408 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb5890481_2315_483e_868e_6145bffd53c3.slice/crio-6b2a742fd5308cbdf59fe08887717183df9f45ad82275d9dfcf2cc5b1e1aba89 WatchSource:0}: Error finding container 6b2a742fd5308cbdf59fe08887717183df9f45ad82275d9dfcf2cc5b1e1aba89: Status 404 returned error can't find the container with id 6b2a742fd5308cbdf59fe08887717183df9f45ad82275d9dfcf2cc5b1e1aba89 Jan 23 07:15:02 crc kubenswrapper[5102]: W0123 07:15:02.623015 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod88a152e3_a95a_4c00_a905_7b88c737c0fc.slice/crio-e3ddbfad18a9bae53afbf558d8c621b2340a207ecfda62c19a60845226cac8fd WatchSource:0}: Error finding container e3ddbfad18a9bae53afbf558d8c621b2340a207ecfda62c19a60845226cac8fd: Status 404 returned error can't find the container with id e3ddbfad18a9bae53afbf558d8c621b2340a207ecfda62c19a60845226cac8fd Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.646975 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-create-z6f8v"] Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.831145 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c788-account-create-update-rwhzz"] Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.858006 5102 generic.go:334] "Generic (PLEG): container finished" podID="d3333009-db65-4416-8fee-26e53bd734cc" containerID="9ec61b7cca495dfcdd0f7462fc5b68e697a6891ec49580d4bec120d4330cecc7" exitCode=0 Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.858089 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" event={"ID":"d3333009-db65-4416-8fee-26e53bd734cc","Type":"ContainerDied","Data":"9ec61b7cca495dfcdd0f7462fc5b68e697a6891ec49580d4bec120d4330cecc7"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.865261 5102 generic.go:334] "Generic (PLEG): container finished" podID="2ec7cc04-6c30-49ca-91c8-99bb4200af09" containerID="53a6ede6ca48bea0ecdc5fb2b5b3c71102206283a4315a7b0bf011fec4695cfb" exitCode=0 Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.865335 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f904-account-create-update-6s8dg" event={"ID":"2ec7cc04-6c30-49ca-91c8-99bb4200af09","Type":"ContainerDied","Data":"53a6ede6ca48bea0ecdc5fb2b5b3c71102206283a4315a7b0bf011fec4695cfb"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.877399 5102 generic.go:334] "Generic (PLEG): container finished" podID="2dd81582-411c-483f-a6e0-08d3172ff873" containerID="eaf789feee4610f2cd2ae2c77e24ab5397a84a378cbf76fa694ebd576b9d3c39" exitCode=0 Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.877473 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6wzvt" event={"ID":"2dd81582-411c-483f-a6e0-08d3172ff873","Type":"ContainerDied","Data":"eaf789feee4610f2cd2ae2c77e24ab5397a84a378cbf76fa694ebd576b9d3c39"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.880247 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-k7zdm" event={"ID":"b5890481-2315-483e-868e-6145bffd53c3","Type":"ContainerStarted","Data":"6b2a742fd5308cbdf59fe08887717183df9f45ad82275d9dfcf2cc5b1e1aba89"} Jan 23 07:15:02 crc kubenswrapper[5102]: W0123 07:15:02.880376 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddc3a1422_92f2_45be_9e26_4768b42d9505.slice/crio-66fc34e6683ebb76d4c790d580db9c0d6b3e858bd8dc6a4784f96675917de8af WatchSource:0}: Error finding container 66fc34e6683ebb76d4c790d580db9c0d6b3e858bd8dc6a4784f96675917de8af: Status 404 returned error can't find the container with id 66fc34e6683ebb76d4c790d580db9c0d6b3e858bd8dc6a4784f96675917de8af Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.886670 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-z6f8v" event={"ID":"88a152e3-a95a-4c00-a905-7b88c737c0fc","Type":"ContainerStarted","Data":"e3ddbfad18a9bae53afbf558d8c621b2340a207ecfda62c19a60845226cac8fd"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.891210 5102 generic.go:334] "Generic (PLEG): container finished" podID="0326d61e-cade-48ab-87e9-7010d5f95ea8" containerID="6e2bf46f7c946934a80177b09a7d6e8d66e8926c7025dcda6c07822a13e75707" exitCode=0 Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.891304 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f350-account-create-update-576nm" event={"ID":"0326d61e-cade-48ab-87e9-7010d5f95ea8","Type":"ContainerDied","Data":"6e2bf46f7c946934a80177b09a7d6e8d66e8926c7025dcda6c07822a13e75707"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.891329 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f350-account-create-update-576nm" event={"ID":"0326d61e-cade-48ab-87e9-7010d5f95ea8","Type":"ContainerStarted","Data":"abf9c7f83a01e5550b6469f6fab5fd9ac6c291976d8982180caac77237ae85ee"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.904371 5102 generic.go:334] "Generic (PLEG): container finished" podID="4d8c9762-bd4b-424c-943b-2b114c08211e" containerID="c6b2bc5b237e5309ec098641c4039d7dd50f686b87ac28ddae325a217c728b3f" exitCode=0 Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.904430 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-vhjgv" event={"ID":"4d8c9762-bd4b-424c-943b-2b114c08211e","Type":"ContainerDied","Data":"c6b2bc5b237e5309ec098641c4039d7dd50f686b87ac28ddae325a217c728b3f"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.904465 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-vhjgv" event={"ID":"4d8c9762-bd4b-424c-943b-2b114c08211e","Type":"ContainerStarted","Data":"09eaef42973c12d7016c637665b6eb190d38f58050f5cfb03be31e1d2ce43a06"} Jan 23 07:15:02 crc kubenswrapper[5102]: I0123 07:15:02.934060 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-create-z6f8v" podStartSLOduration=1.933991623 podStartE2EDuration="1.933991623s" podCreationTimestamp="2026-01-23 07:15:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:02.92329961 +0000 UTC m=+1253.743648585" watchObservedRunningTime="2026-01-23 07:15:02.933991623 +0000 UTC m=+1253.754340598" Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.449696 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.510049 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-9fgnj"] Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.510529 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" podUID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerName="dnsmasq-dns" containerID="cri-o://1ceec8956499865fff96f6ae64bb6e4f03781e2ae5d8da6021390287677e14c9" gracePeriod=10 Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.939982 5102 generic.go:334] "Generic (PLEG): container finished" podID="dc3a1422-92f2-45be-9e26-4768b42d9505" containerID="decc8d23997068046d25aae949afcb423ad378742100603636a84a6e4ce1c056" exitCode=0 Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.940087 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c788-account-create-update-rwhzz" event={"ID":"dc3a1422-92f2-45be-9e26-4768b42d9505","Type":"ContainerDied","Data":"decc8d23997068046d25aae949afcb423ad378742100603636a84a6e4ce1c056"} Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.940160 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c788-account-create-update-rwhzz" event={"ID":"dc3a1422-92f2-45be-9e26-4768b42d9505","Type":"ContainerStarted","Data":"66fc34e6683ebb76d4c790d580db9c0d6b3e858bd8dc6a4784f96675917de8af"} Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.943372 5102 generic.go:334] "Generic (PLEG): container finished" podID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerID="1ceec8956499865fff96f6ae64bb6e4f03781e2ae5d8da6021390287677e14c9" exitCode=0 Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.943468 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" event={"ID":"0f92ba08-c435-49a6-96cc-dd18ef33f14a","Type":"ContainerDied","Data":"1ceec8956499865fff96f6ae64bb6e4f03781e2ae5d8da6021390287677e14c9"} Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.945887 5102 generic.go:334] "Generic (PLEG): container finished" podID="88a152e3-a95a-4c00-a905-7b88c737c0fc" containerID="0910323f4efad34b947cd9ea3328e56c11965b6cf6e98f6586c7a0af4f002f66" exitCode=0 Jan 23 07:15:03 crc kubenswrapper[5102]: I0123 07:15:03.946015 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-z6f8v" event={"ID":"88a152e3-a95a-4c00-a905-7b88c737c0fc","Type":"ContainerDied","Data":"0910323f4efad34b947cd9ea3328e56c11965b6cf6e98f6586c7a0af4f002f66"} Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.048921 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.173313 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-dns-svc\") pod \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.173945 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cgrjd\" (UniqueName: \"kubernetes.io/projected/0f92ba08-c435-49a6-96cc-dd18ef33f14a-kube-api-access-cgrjd\") pod \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.173992 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-config\") pod \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.174143 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-sb\") pod \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.175063 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-nb\") pod \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\" (UID: \"0f92ba08-c435-49a6-96cc-dd18ef33f14a\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.186826 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f92ba08-c435-49a6-96cc-dd18ef33f14a-kube-api-access-cgrjd" (OuterVolumeSpecName: "kube-api-access-cgrjd") pod "0f92ba08-c435-49a6-96cc-dd18ef33f14a" (UID: "0f92ba08-c435-49a6-96cc-dd18ef33f14a"). InnerVolumeSpecName "kube-api-access-cgrjd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.254709 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0f92ba08-c435-49a6-96cc-dd18ef33f14a" (UID: "0f92ba08-c435-49a6-96cc-dd18ef33f14a"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.279262 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.279295 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cgrjd\" (UniqueName: \"kubernetes.io/projected/0f92ba08-c435-49a6-96cc-dd18ef33f14a-kube-api-access-cgrjd\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.280840 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0f92ba08-c435-49a6-96cc-dd18ef33f14a" (UID: "0f92ba08-c435-49a6-96cc-dd18ef33f14a"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.281778 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0f92ba08-c435-49a6-96cc-dd18ef33f14a" (UID: "0f92ba08-c435-49a6-96cc-dd18ef33f14a"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.298091 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-config" (OuterVolumeSpecName: "config") pod "0f92ba08-c435-49a6-96cc-dd18ef33f14a" (UID: "0f92ba08-c435-49a6-96cc-dd18ef33f14a"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.381902 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.381961 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.381978 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0f92ba08-c435-49a6-96cc-dd18ef33f14a-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.407517 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.558010 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.568202 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.578502 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.584286 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd81582-411c-483f-a6e0-08d3172ff873-operator-scripts\") pod \"2dd81582-411c-483f-a6e0-08d3172ff873\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.584461 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jlpkn\" (UniqueName: \"kubernetes.io/projected/2dd81582-411c-483f-a6e0-08d3172ff873-kube-api-access-jlpkn\") pod \"2dd81582-411c-483f-a6e0-08d3172ff873\" (UID: \"2dd81582-411c-483f-a6e0-08d3172ff873\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.584707 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2dd81582-411c-483f-a6e0-08d3172ff873-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2dd81582-411c-483f-a6e0-08d3172ff873" (UID: "2dd81582-411c-483f-a6e0-08d3172ff873"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.585694 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2dd81582-411c-483f-a6e0-08d3172ff873-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.591564 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2dd81582-411c-483f-a6e0-08d3172ff873-kube-api-access-jlpkn" (OuterVolumeSpecName: "kube-api-access-jlpkn") pod "2dd81582-411c-483f-a6e0-08d3172ff873" (UID: "2dd81582-411c-483f-a6e0-08d3172ff873"). InnerVolumeSpecName "kube-api-access-jlpkn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.598252 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.686962 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d8c9762-bd4b-424c-943b-2b114c08211e-operator-scripts\") pod \"4d8c9762-bd4b-424c-943b-2b114c08211e\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687046 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ts669\" (UniqueName: \"kubernetes.io/projected/d3333009-db65-4416-8fee-26e53bd734cc-kube-api-access-ts669\") pod \"d3333009-db65-4416-8fee-26e53bd734cc\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687078 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3333009-db65-4416-8fee-26e53bd734cc-config-volume\") pod \"d3333009-db65-4416-8fee-26e53bd734cc\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687117 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gtplb\" (UniqueName: \"kubernetes.io/projected/4d8c9762-bd4b-424c-943b-2b114c08211e-kube-api-access-gtplb\") pod \"4d8c9762-bd4b-424c-943b-2b114c08211e\" (UID: \"4d8c9762-bd4b-424c-943b-2b114c08211e\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687139 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0326d61e-cade-48ab-87e9-7010d5f95ea8-operator-scripts\") pod \"0326d61e-cade-48ab-87e9-7010d5f95ea8\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687486 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-z9vjs\" (UniqueName: \"kubernetes.io/projected/0326d61e-cade-48ab-87e9-7010d5f95ea8-kube-api-access-z9vjs\") pod \"0326d61e-cade-48ab-87e9-7010d5f95ea8\" (UID: \"0326d61e-cade-48ab-87e9-7010d5f95ea8\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687611 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4d8c9762-bd4b-424c-943b-2b114c08211e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4d8c9762-bd4b-424c-943b-2b114c08211e" (UID: "4d8c9762-bd4b-424c-943b-2b114c08211e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687624 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0326d61e-cade-48ab-87e9-7010d5f95ea8-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0326d61e-cade-48ab-87e9-7010d5f95ea8" (UID: "0326d61e-cade-48ab-87e9-7010d5f95ea8"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.687803 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3333009-db65-4416-8fee-26e53bd734cc-secret-volume\") pod \"d3333009-db65-4416-8fee-26e53bd734cc\" (UID: \"d3333009-db65-4416-8fee-26e53bd734cc\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.688365 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4d8c9762-bd4b-424c-943b-2b114c08211e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.688378 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0326d61e-cade-48ab-87e9-7010d5f95ea8-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.688388 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jlpkn\" (UniqueName: \"kubernetes.io/projected/2dd81582-411c-483f-a6e0-08d3172ff873-kube-api-access-jlpkn\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.689411 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d3333009-db65-4416-8fee-26e53bd734cc-config-volume" (OuterVolumeSpecName: "config-volume") pod "d3333009-db65-4416-8fee-26e53bd734cc" (UID: "d3333009-db65-4416-8fee-26e53bd734cc"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.691028 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d3333009-db65-4416-8fee-26e53bd734cc-kube-api-access-ts669" (OuterVolumeSpecName: "kube-api-access-ts669") pod "d3333009-db65-4416-8fee-26e53bd734cc" (UID: "d3333009-db65-4416-8fee-26e53bd734cc"). InnerVolumeSpecName "kube-api-access-ts669". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.691767 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d3333009-db65-4416-8fee-26e53bd734cc-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d3333009-db65-4416-8fee-26e53bd734cc" (UID: "d3333009-db65-4416-8fee-26e53bd734cc"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.691915 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4d8c9762-bd4b-424c-943b-2b114c08211e-kube-api-access-gtplb" (OuterVolumeSpecName: "kube-api-access-gtplb") pod "4d8c9762-bd4b-424c-943b-2b114c08211e" (UID: "4d8c9762-bd4b-424c-943b-2b114c08211e"). InnerVolumeSpecName "kube-api-access-gtplb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.692639 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0326d61e-cade-48ab-87e9-7010d5f95ea8-kube-api-access-z9vjs" (OuterVolumeSpecName: "kube-api-access-z9vjs") pod "0326d61e-cade-48ab-87e9-7010d5f95ea8" (UID: "0326d61e-cade-48ab-87e9-7010d5f95ea8"). InnerVolumeSpecName "kube-api-access-z9vjs". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.789273 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pkpqg\" (UniqueName: \"kubernetes.io/projected/2ec7cc04-6c30-49ca-91c8-99bb4200af09-kube-api-access-pkpqg\") pod \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.789338 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ec7cc04-6c30-49ca-91c8-99bb4200af09-operator-scripts\") pod \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\" (UID: \"2ec7cc04-6c30-49ca-91c8-99bb4200af09\") " Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.790036 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ts669\" (UniqueName: \"kubernetes.io/projected/d3333009-db65-4416-8fee-26e53bd734cc-kube-api-access-ts669\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.790062 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d3333009-db65-4416-8fee-26e53bd734cc-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.790076 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gtplb\" (UniqueName: \"kubernetes.io/projected/4d8c9762-bd4b-424c-943b-2b114c08211e-kube-api-access-gtplb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.790089 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-z9vjs\" (UniqueName: \"kubernetes.io/projected/0326d61e-cade-48ab-87e9-7010d5f95ea8-kube-api-access-z9vjs\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.790101 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d3333009-db65-4416-8fee-26e53bd734cc-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.790320 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2ec7cc04-6c30-49ca-91c8-99bb4200af09-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "2ec7cc04-6c30-49ca-91c8-99bb4200af09" (UID: "2ec7cc04-6c30-49ca-91c8-99bb4200af09"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.793009 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ec7cc04-6c30-49ca-91c8-99bb4200af09-kube-api-access-pkpqg" (OuterVolumeSpecName: "kube-api-access-pkpqg") pod "2ec7cc04-6c30-49ca-91c8-99bb4200af09" (UID: "2ec7cc04-6c30-49ca-91c8-99bb4200af09"). InnerVolumeSpecName "kube-api-access-pkpqg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.891826 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pkpqg\" (UniqueName: \"kubernetes.io/projected/2ec7cc04-6c30-49ca-91c8-99bb4200af09-kube-api-access-pkpqg\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.891873 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/2ec7cc04-6c30-49ca-91c8-99bb4200af09-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.955617 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-576nm" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.955640 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f350-account-create-update-576nm" event={"ID":"0326d61e-cade-48ab-87e9-7010d5f95ea8","Type":"ContainerDied","Data":"abf9c7f83a01e5550b6469f6fab5fd9ac6c291976d8982180caac77237ae85ee"} Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.955680 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="abf9c7f83a01e5550b6469f6fab5fd9ac6c291976d8982180caac77237ae85ee" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.957507 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-create-vhjgv" event={"ID":"4d8c9762-bd4b-424c-943b-2b114c08211e","Type":"ContainerDied","Data":"09eaef42973c12d7016c637665b6eb190d38f58050f5cfb03be31e1d2ce43a06"} Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.957565 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-create-vhjgv" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.957580 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="09eaef42973c12d7016c637665b6eb190d38f58050f5cfb03be31e1d2ce43a06" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.961435 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.961429 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd" event={"ID":"d3333009-db65-4416-8fee-26e53bd734cc","Type":"ContainerDied","Data":"4a11e3fb68027b1aa709287df9db0d87cf831a9dfb429b10d2c09abd6eca6cf8"} Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.961491 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a11e3fb68027b1aa709287df9db0d87cf831a9dfb429b10d2c09abd6eca6cf8" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.962770 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f904-account-create-update-6s8dg" event={"ID":"2ec7cc04-6c30-49ca-91c8-99bb4200af09","Type":"ContainerDied","Data":"612ddf8a6f7c8f895e0b9b8fba3b9dc986edf5aa0172ed6a1fb8a1bbe3b98b7a"} Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.962803 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="612ddf8a6f7c8f895e0b9b8fba3b9dc986edf5aa0172ed6a1fb8a1bbe3b98b7a" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.962855 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-6s8dg" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.966041 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-create-6wzvt" event={"ID":"2dd81582-411c-483f-a6e0-08d3172ff873","Type":"ContainerDied","Data":"5fcf2a5df234126aa6f75b813fc375db00e859cd9d60fbc2074280b42f7192bb"} Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.966085 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5fcf2a5df234126aa6f75b813fc375db00e859cd9d60fbc2074280b42f7192bb" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.966099 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-create-6wzvt" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.968091 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.968093 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-67fdf7998c-9fgnj" event={"ID":"0f92ba08-c435-49a6-96cc-dd18ef33f14a","Type":"ContainerDied","Data":"f3ecf2b23cf99280baff1a8af2c780709fdcfe3a185d73f2fb3f0b09fb1c4ab7"} Jan 23 07:15:04 crc kubenswrapper[5102]: I0123 07:15:04.968269 5102 scope.go:117] "RemoveContainer" containerID="1ceec8956499865fff96f6ae64bb6e4f03781e2ae5d8da6021390287677e14c9" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.021510 5102 scope.go:117] "RemoveContainer" containerID="050b1a1ec1e060b981c1246b97bab3a71cb8dd765325a2b812e485720f3c1d94" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.061569 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-9fgnj"] Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.072648 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-67fdf7998c-9fgnj"] Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.328393 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.406947 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc3a1422-92f2-45be-9e26-4768b42d9505-operator-scripts\") pod \"dc3a1422-92f2-45be-9e26-4768b42d9505\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.407120 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hvd9w\" (UniqueName: \"kubernetes.io/projected/dc3a1422-92f2-45be-9e26-4768b42d9505-kube-api-access-hvd9w\") pod \"dc3a1422-92f2-45be-9e26-4768b42d9505\" (UID: \"dc3a1422-92f2-45be-9e26-4768b42d9505\") " Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.409256 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dc3a1422-92f2-45be-9e26-4768b42d9505-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "dc3a1422-92f2-45be-9e26-4768b42d9505" (UID: "dc3a1422-92f2-45be-9e26-4768b42d9505"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.435812 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc3a1422-92f2-45be-9e26-4768b42d9505-kube-api-access-hvd9w" (OuterVolumeSpecName: "kube-api-access-hvd9w") pod "dc3a1422-92f2-45be-9e26-4768b42d9505" (UID: "dc3a1422-92f2-45be-9e26-4768b42d9505"). InnerVolumeSpecName "kube-api-access-hvd9w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.453855 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.513901 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lkcnm\" (UniqueName: \"kubernetes.io/projected/88a152e3-a95a-4c00-a905-7b88c737c0fc-kube-api-access-lkcnm\") pod \"88a152e3-a95a-4c00-a905-7b88c737c0fc\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.514005 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88a152e3-a95a-4c00-a905-7b88c737c0fc-operator-scripts\") pod \"88a152e3-a95a-4c00-a905-7b88c737c0fc\" (UID: \"88a152e3-a95a-4c00-a905-7b88c737c0fc\") " Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.514264 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/dc3a1422-92f2-45be-9e26-4768b42d9505-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.514280 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hvd9w\" (UniqueName: \"kubernetes.io/projected/dc3a1422-92f2-45be-9e26-4768b42d9505-kube-api-access-hvd9w\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.514693 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/88a152e3-a95a-4c00-a905-7b88c737c0fc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "88a152e3-a95a-4c00-a905-7b88c737c0fc" (UID: "88a152e3-a95a-4c00-a905-7b88c737c0fc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.521709 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/88a152e3-a95a-4c00-a905-7b88c737c0fc-kube-api-access-lkcnm" (OuterVolumeSpecName: "kube-api-access-lkcnm") pod "88a152e3-a95a-4c00-a905-7b88c737c0fc" (UID: "88a152e3-a95a-4c00-a905-7b88c737c0fc"). InnerVolumeSpecName "kube-api-access-lkcnm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.607627 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" path="/var/lib/kubelet/pods/0f92ba08-c435-49a6-96cc-dd18ef33f14a/volumes" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.616292 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/88a152e3-a95a-4c00-a905-7b88c737c0fc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.616320 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lkcnm\" (UniqueName: \"kubernetes.io/projected/88a152e3-a95a-4c00-a905-7b88c737c0fc-kube-api-access-lkcnm\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.982987 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c788-account-create-update-rwhzz" event={"ID":"dc3a1422-92f2-45be-9e26-4768b42d9505","Type":"ContainerDied","Data":"66fc34e6683ebb76d4c790d580db9c0d6b3e858bd8dc6a4784f96675917de8af"} Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.983045 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="66fc34e6683ebb76d4c790d580db9c0d6b3e858bd8dc6a4784f96675917de8af" Jan 23 07:15:05 crc kubenswrapper[5102]: I0123 07:15:05.983170 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-rwhzz" Jan 23 07:15:06 crc kubenswrapper[5102]: I0123 07:15:05.999517 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-create-z6f8v" event={"ID":"88a152e3-a95a-4c00-a905-7b88c737c0fc","Type":"ContainerDied","Data":"e3ddbfad18a9bae53afbf558d8c621b2340a207ecfda62c19a60845226cac8fd"} Jan 23 07:15:06 crc kubenswrapper[5102]: I0123 07:15:05.999567 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e3ddbfad18a9bae53afbf558d8c621b2340a207ecfda62c19a60845226cac8fd" Jan 23 07:15:06 crc kubenswrapper[5102]: I0123 07:15:05.999628 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-create-z6f8v" Jan 23 07:15:07 crc kubenswrapper[5102]: I0123 07:15:07.012570 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gp42d" event={"ID":"d17cb94c-536a-4a89-aac5-802cc52ae2ce","Type":"ContainerDied","Data":"4d42dba66296dcae374056c93eefa27df0374a17bdd663a51a98a8779a3b1ae0"} Jan 23 07:15:07 crc kubenswrapper[5102]: I0123 07:15:07.012606 5102 generic.go:334] "Generic (PLEG): container finished" podID="d17cb94c-536a-4a89-aac5-802cc52ae2ce" containerID="4d42dba66296dcae374056c93eefa27df0374a17bdd663a51a98a8779a3b1ae0" exitCode=0 Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.738480 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gp42d" Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.895851 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-config-data\") pod \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.896328 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-combined-ca-bundle\") pod \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.896402 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pfqjn\" (UniqueName: \"kubernetes.io/projected/d17cb94c-536a-4a89-aac5-802cc52ae2ce-kube-api-access-pfqjn\") pod \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.896470 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-db-sync-config-data\") pod \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\" (UID: \"d17cb94c-536a-4a89-aac5-802cc52ae2ce\") " Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.918296 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d17cb94c-536a-4a89-aac5-802cc52ae2ce-kube-api-access-pfqjn" (OuterVolumeSpecName: "kube-api-access-pfqjn") pod "d17cb94c-536a-4a89-aac5-802cc52ae2ce" (UID: "d17cb94c-536a-4a89-aac5-802cc52ae2ce"). InnerVolumeSpecName "kube-api-access-pfqjn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.924037 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "d17cb94c-536a-4a89-aac5-802cc52ae2ce" (UID: "d17cb94c-536a-4a89-aac5-802cc52ae2ce"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.929685 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d17cb94c-536a-4a89-aac5-802cc52ae2ce" (UID: "d17cb94c-536a-4a89-aac5-802cc52ae2ce"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:08 crc kubenswrapper[5102]: I0123 07:15:08.988299 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-config-data" (OuterVolumeSpecName: "config-data") pod "d17cb94c-536a-4a89-aac5-802cc52ae2ce" (UID: "d17cb94c-536a-4a89-aac5-802cc52ae2ce"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:08.999985 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.000066 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pfqjn\" (UniqueName: \"kubernetes.io/projected/d17cb94c-536a-4a89-aac5-802cc52ae2ce-kube-api-access-pfqjn\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.000085 5102 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.000097 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d17cb94c-536a-4a89-aac5-802cc52ae2ce-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.038574 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-k7zdm" event={"ID":"b5890481-2315-483e-868e-6145bffd53c3","Type":"ContainerStarted","Data":"a2c0fa2bd612a343ce6b3fcfef0d06e46c76af03843c6c3a225b21e106594666"} Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.040053 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-db-sync-gp42d" event={"ID":"d17cb94c-536a-4a89-aac5-802cc52ae2ce","Type":"ContainerDied","Data":"fe3d6a54905aecf9cb4bd29261751e97537bac39fdc42198757d2bdb3116e65c"} Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.040102 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe3d6a54905aecf9cb4bd29261751e97537bac39fdc42198757d2bdb3116e65c" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.040180 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-db-sync-gp42d" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.073026 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-db-sync-k7zdm" podStartSLOduration=2.112487897 podStartE2EDuration="8.072996786s" podCreationTimestamp="2026-01-23 07:15:01 +0000 UTC" firstStartedPulling="2026-01-23 07:15:02.621729376 +0000 UTC m=+1253.442078341" lastFinishedPulling="2026-01-23 07:15:08.582238265 +0000 UTC m=+1259.402587230" observedRunningTime="2026-01-23 07:15:09.066416082 +0000 UTC m=+1259.886765067" watchObservedRunningTime="2026-01-23 07:15:09.072996786 +0000 UTC m=+1259.893345761" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.545881 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-74dfc89d77-gh7kw"] Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547098 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerName="dnsmasq-dns" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547125 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerName="dnsmasq-dns" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547154 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d3333009-db65-4416-8fee-26e53bd734cc" containerName="collect-profiles" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547164 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d3333009-db65-4416-8fee-26e53bd734cc" containerName="collect-profiles" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547193 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerName="init" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547201 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerName="init" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547217 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2dd81582-411c-483f-a6e0-08d3172ff873" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547225 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2dd81582-411c-483f-a6e0-08d3172ff873" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547239 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0326d61e-cade-48ab-87e9-7010d5f95ea8" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547250 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0326d61e-cade-48ab-87e9-7010d5f95ea8" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547262 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4d8c9762-bd4b-424c-943b-2b114c08211e" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547269 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4d8c9762-bd4b-424c-943b-2b114c08211e" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547277 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d17cb94c-536a-4a89-aac5-802cc52ae2ce" containerName="glance-db-sync" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547284 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d17cb94c-536a-4a89-aac5-802cc52ae2ce" containerName="glance-db-sync" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547295 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="88a152e3-a95a-4c00-a905-7b88c737c0fc" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547301 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="88a152e3-a95a-4c00-a905-7b88c737c0fc" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547323 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec7cc04-6c30-49ca-91c8-99bb4200af09" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547329 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec7cc04-6c30-49ca-91c8-99bb4200af09" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: E0123 07:15:09.547342 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dc3a1422-92f2-45be-9e26-4768b42d9505" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547348 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="dc3a1422-92f2-45be-9e26-4768b42d9505" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547515 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d3333009-db65-4416-8fee-26e53bd734cc" containerName="collect-profiles" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547528 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d17cb94c-536a-4a89-aac5-802cc52ae2ce" containerName="glance-db-sync" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547566 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="88a152e3-a95a-4c00-a905-7b88c737c0fc" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547575 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4d8c9762-bd4b-424c-943b-2b114c08211e" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547584 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="dc3a1422-92f2-45be-9e26-4768b42d9505" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547595 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ec7cc04-6c30-49ca-91c8-99bb4200af09" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547604 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f92ba08-c435-49a6-96cc-dd18ef33f14a" containerName="dnsmasq-dns" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547613 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0326d61e-cade-48ab-87e9-7010d5f95ea8" containerName="mariadb-account-create-update" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.547620 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2dd81582-411c-483f-a6e0-08d3172ff873" containerName="mariadb-database-create" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.548985 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.554233 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dfc89d77-gh7kw"] Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.710849 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-svc\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.711133 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-sb\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.711241 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-config\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.711409 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f8sm8\" (UniqueName: \"kubernetes.io/projected/b36f470b-1f30-4301-9857-ee452434444f-kube-api-access-f8sm8\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.711497 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-nb\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.711623 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-swift-storage-0\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.813435 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f8sm8\" (UniqueName: \"kubernetes.io/projected/b36f470b-1f30-4301-9857-ee452434444f-kube-api-access-f8sm8\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.813522 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-nb\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.813559 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-swift-storage-0\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.813583 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-svc\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.813610 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-sb\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.813646 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-config\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.814831 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-nb\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.814836 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-swift-storage-0\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.814929 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-config\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.815028 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-svc\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.815144 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-sb\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.837090 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f8sm8\" (UniqueName: \"kubernetes.io/projected/b36f470b-1f30-4301-9857-ee452434444f-kube-api-access-f8sm8\") pod \"dnsmasq-dns-74dfc89d77-gh7kw\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:09 crc kubenswrapper[5102]: I0123 07:15:09.911444 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:10 crc kubenswrapper[5102]: I0123 07:15:10.425358 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-74dfc89d77-gh7kw"] Jan 23 07:15:11 crc kubenswrapper[5102]: I0123 07:15:11.061594 5102 generic.go:334] "Generic (PLEG): container finished" podID="b36f470b-1f30-4301-9857-ee452434444f" containerID="11d4406a839cd3e3eb77349ca0c8a770fcad219c8ea44e70b0a13b5ca24db773" exitCode=0 Jan 23 07:15:11 crc kubenswrapper[5102]: I0123 07:15:11.061672 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" event={"ID":"b36f470b-1f30-4301-9857-ee452434444f","Type":"ContainerDied","Data":"11d4406a839cd3e3eb77349ca0c8a770fcad219c8ea44e70b0a13b5ca24db773"} Jan 23 07:15:11 crc kubenswrapper[5102]: I0123 07:15:11.062185 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" event={"ID":"b36f470b-1f30-4301-9857-ee452434444f","Type":"ContainerStarted","Data":"1ba3533af6610d58a871f0abc0738b0272531476ff88ec60058ca040e51b410c"} Jan 23 07:15:12 crc kubenswrapper[5102]: I0123 07:15:12.073371 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" event={"ID":"b36f470b-1f30-4301-9857-ee452434444f","Type":"ContainerStarted","Data":"2c4b50095b99a86d87082e3b696d20ba8fb8c83a0af28a8ec8b271719de3cc2a"} Jan 23 07:15:12 crc kubenswrapper[5102]: I0123 07:15:12.074450 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:12 crc kubenswrapper[5102]: I0123 07:15:12.107697 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" podStartSLOduration=3.107669539 podStartE2EDuration="3.107669539s" podCreationTimestamp="2026-01-23 07:15:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:12.095851982 +0000 UTC m=+1262.916200957" watchObservedRunningTime="2026-01-23 07:15:12.107669539 +0000 UTC m=+1262.928018514" Jan 23 07:15:13 crc kubenswrapper[5102]: I0123 07:15:13.083720 5102 generic.go:334] "Generic (PLEG): container finished" podID="b5890481-2315-483e-868e-6145bffd53c3" containerID="a2c0fa2bd612a343ce6b3fcfef0d06e46c76af03843c6c3a225b21e106594666" exitCode=0 Jan 23 07:15:13 crc kubenswrapper[5102]: I0123 07:15:13.083821 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-k7zdm" event={"ID":"b5890481-2315-483e-868e-6145bffd53c3","Type":"ContainerDied","Data":"a2c0fa2bd612a343ce6b3fcfef0d06e46c76af03843c6c3a225b21e106594666"} Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.462528 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.612493 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxbbv\" (UniqueName: \"kubernetes.io/projected/b5890481-2315-483e-868e-6145bffd53c3-kube-api-access-fxbbv\") pod \"b5890481-2315-483e-868e-6145bffd53c3\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.613120 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-config-data\") pod \"b5890481-2315-483e-868e-6145bffd53c3\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.613350 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-combined-ca-bundle\") pod \"b5890481-2315-483e-868e-6145bffd53c3\" (UID: \"b5890481-2315-483e-868e-6145bffd53c3\") " Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.621018 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5890481-2315-483e-868e-6145bffd53c3-kube-api-access-fxbbv" (OuterVolumeSpecName: "kube-api-access-fxbbv") pod "b5890481-2315-483e-868e-6145bffd53c3" (UID: "b5890481-2315-483e-868e-6145bffd53c3"). InnerVolumeSpecName "kube-api-access-fxbbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.641661 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5890481-2315-483e-868e-6145bffd53c3" (UID: "b5890481-2315-483e-868e-6145bffd53c3"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.690581 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-config-data" (OuterVolumeSpecName: "config-data") pod "b5890481-2315-483e-868e-6145bffd53c3" (UID: "b5890481-2315-483e-868e-6145bffd53c3"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.715671 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.715726 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5890481-2315-483e-868e-6145bffd53c3-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:14 crc kubenswrapper[5102]: I0123 07:15:14.715748 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxbbv\" (UniqueName: \"kubernetes.io/projected/b5890481-2315-483e-868e-6145bffd53c3-kube-api-access-fxbbv\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.110664 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-db-sync-k7zdm" event={"ID":"b5890481-2315-483e-868e-6145bffd53c3","Type":"ContainerDied","Data":"6b2a742fd5308cbdf59fe08887717183df9f45ad82275d9dfcf2cc5b1e1aba89"} Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.110740 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6b2a742fd5308cbdf59fe08887717183df9f45ad82275d9dfcf2cc5b1e1aba89" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.110830 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-db-sync-k7zdm" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.431846 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dfc89d77-gh7kw"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.432242 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" podUID="b36f470b-1f30-4301-9857-ee452434444f" containerName="dnsmasq-dns" containerID="cri-o://2c4b50095b99a86d87082e3b696d20ba8fb8c83a0af28a8ec8b271719de3cc2a" gracePeriod=10 Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.443640 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-hjvgh"] Jan 23 07:15:15 crc kubenswrapper[5102]: E0123 07:15:15.444129 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5890481-2315-483e-868e-6145bffd53c3" containerName="keystone-db-sync" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.444157 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5890481-2315-483e-868e-6145bffd53c3" containerName="keystone-db-sync" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.444395 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5890481-2315-483e-868e-6145bffd53c3" containerName="keystone-db-sync" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.445154 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.457261 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.457672 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-f5hn2" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.464569 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.482524 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.483201 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.521949 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hjvgh"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.547735 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-fernet-keys\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.547844 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-combined-ca-bundle\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.548013 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-config-data\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.548136 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-credential-keys\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.548165 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n9kd6\" (UniqueName: \"kubernetes.io/projected/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-kube-api-access-n9kd6\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.548256 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-scripts\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.601304 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-5fdbfbc95f-r8vzb"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.604813 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.651204 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-config-data\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.651282 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-credential-keys\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.651305 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n9kd6\" (UniqueName: \"kubernetes.io/projected/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-kube-api-access-n9kd6\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.651343 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-scripts\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.651715 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-fernet-keys\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.654739 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-combined-ca-bundle\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.659051 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-scripts\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.675324 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-config-data\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.675821 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-fernet-keys\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.676141 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-credential-keys\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.680403 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-5fdbfbc95f-r8vzb"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.683379 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-combined-ca-bundle\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.721413 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n9kd6\" (UniqueName: \"kubernetes.io/projected/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-kube-api-access-n9kd6\") pod \"keystone-bootstrap-hjvgh\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.740647 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.743422 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.751966 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.752249 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.757580 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-nb\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.757747 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-config\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.757830 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-sb\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.757959 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-swift-storage-0\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.757993 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vcplr\" (UniqueName: \"kubernetes.io/projected/20d9b83d-5f21-4d20-a999-5f14c97751e1-kube-api-access-vcplr\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.758047 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-svc\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.772817 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.781962 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-db-sync-d2mqk"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.783519 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.786593 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.822395 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-d2mqk"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.823650 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-scb4q" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.824004 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.824219 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.844660 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-db-sync-62l6f"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.846245 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.858643 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-nlrnn" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.858786 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859057 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-scripts\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859117 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-svc\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859152 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-run-httpd\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859171 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859207 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-nb\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859244 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f79qn\" (UniqueName: \"kubernetes.io/projected/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-kube-api-access-f79qn\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859281 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-log-httpd\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859308 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-config\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859336 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859359 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-sb\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859392 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-config-data\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859419 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-swift-storage-0\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.859438 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vcplr\" (UniqueName: \"kubernetes.io/projected/20d9b83d-5f21-4d20-a999-5f14c97751e1-kube-api-access-vcplr\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.860105 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-svc\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.860359 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-nb\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.861230 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-sb\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.861750 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-swift-storage-0\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.869115 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-config\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.878160 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-db-sync-c2dfg"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.879134 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.908421 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.908647 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.908810 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-qkq76" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.932405 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-62l6f"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.946073 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vcplr\" (UniqueName: \"kubernetes.io/projected/20d9b83d-5f21-4d20-a999-5f14c97751e1-kube-api-access-vcplr\") pod \"dnsmasq-dns-5fdbfbc95f-r8vzb\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.956348 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-c2dfg"] Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964102 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-log-httpd\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964172 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8smfd\" (UniqueName: \"kubernetes.io/projected/19832d65-364c-4340-9109-57b179d8a14c-kube-api-access-8smfd\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964224 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-db-sync-config-data\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964285 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964676 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-config-data\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964709 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-combined-ca-bundle\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964782 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-scripts\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964835 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-config\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964867 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-run-httpd\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964886 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964967 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f79qn\" (UniqueName: \"kubernetes.io/projected/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-kube-api-access-f79qn\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.964989 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-combined-ca-bundle\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.965019 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wzx6c\" (UniqueName: \"kubernetes.io/projected/a9080948-c87d-49da-b53e-b5228f44a2d4-kube-api-access-wzx6c\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.967956 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-log-httpd\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.973683 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-scripts\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.974702 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-run-httpd\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.979734 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.991819 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-config-data\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:15 crc kubenswrapper[5102]: I0123 07:15:15.992047 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.007454 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f79qn\" (UniqueName: \"kubernetes.io/projected/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-kube-api-access-f79qn\") pod \"ceilometer-0\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " pod="openstack/ceilometer-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.048858 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-db-sync-2kgk7"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.050475 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.055071 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-tmxnq" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.055457 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.055657 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.057766 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fdbfbc95f-r8vzb"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.058527 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066221 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-combined-ca-bundle\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066262 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-config-data\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066285 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-scripts\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066320 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-combined-ca-bundle\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066359 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-config\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066380 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f67t9\" (UniqueName: \"kubernetes.io/projected/ad1791e1-86ab-44e5-99e9-399e93cffc68-kube-api-access-f67t9\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066412 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-db-sync-config-data\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066445 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-combined-ca-bundle\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066469 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad1791e1-86ab-44e5-99e9-399e93cffc68-etc-machine-id\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066485 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wzx6c\" (UniqueName: \"kubernetes.io/projected/a9080948-c87d-49da-b53e-b5228f44a2d4-kube-api-access-wzx6c\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066515 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8smfd\" (UniqueName: \"kubernetes.io/projected/19832d65-364c-4340-9109-57b179d8a14c-kube-api-access-8smfd\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.066568 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-db-sync-config-data\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.067224 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-2kgk7"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.075404 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-db-sync-config-data\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.075917 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-combined-ca-bundle\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.076736 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-config\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.078397 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-combined-ca-bundle\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.083988 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-5d7pk"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.089254 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-5d7pk"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.089367 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.089367 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wzx6c\" (UniqueName: \"kubernetes.io/projected/a9080948-c87d-49da-b53e-b5228f44a2d4-kube-api-access-wzx6c\") pod \"barbican-db-sync-62l6f\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.090229 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8smfd\" (UniqueName: \"kubernetes.io/projected/19832d65-364c-4340-9109-57b179d8a14c-kube-api-access-8smfd\") pod \"neutron-db-sync-d2mqk\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.126622 5102 generic.go:334] "Generic (PLEG): container finished" podID="b36f470b-1f30-4301-9857-ee452434444f" containerID="2c4b50095b99a86d87082e3b696d20ba8fb8c83a0af28a8ec8b271719de3cc2a" exitCode=0 Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.126661 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" event={"ID":"b36f470b-1f30-4301-9857-ee452434444f","Type":"ContainerDied","Data":"2c4b50095b99a86d87082e3b696d20ba8fb8c83a0af28a8ec8b271719de3cc2a"} Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168367 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-combined-ca-bundle\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168440 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f67t9\" (UniqueName: \"kubernetes.io/projected/ad1791e1-86ab-44e5-99e9-399e93cffc68-kube-api-access-f67t9\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168490 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-scripts\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168510 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-logs\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168529 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-config-data\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168561 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-db-sync-config-data\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168617 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad1791e1-86ab-44e5-99e9-399e93cffc68-etc-machine-id\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168680 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hvrm\" (UniqueName: \"kubernetes.io/projected/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-kube-api-access-5hvrm\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168703 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-combined-ca-bundle\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168720 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-config-data\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168736 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-scripts\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.168926 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad1791e1-86ab-44e5-99e9-399e93cffc68-etc-machine-id\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.174118 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-combined-ca-bundle\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.174435 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-db-sync-config-data\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.177381 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-config-data\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.184906 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-scripts\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.187948 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f67t9\" (UniqueName: \"kubernetes.io/projected/ad1791e1-86ab-44e5-99e9-399e93cffc68-kube-api-access-f67t9\") pod \"cinder-db-sync-c2dfg\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.274919 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275099 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-config\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275172 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hvrm\" (UniqueName: \"kubernetes.io/projected/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-kube-api-access-5hvrm\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275264 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-svc\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275449 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275523 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275678 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2psvx\" (UniqueName: \"kubernetes.io/projected/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-kube-api-access-2psvx\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275827 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-combined-ca-bundle\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275938 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-scripts\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.275977 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-logs\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.276006 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-config-data\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.276917 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-logs\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.277351 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.280516 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-config-data\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.283827 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-scripts\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.290477 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-combined-ca-bundle\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.294044 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hvrm\" (UniqueName: \"kubernetes.io/projected/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-kube-api-access-5hvrm\") pod \"placement-db-sync-2kgk7\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.318493 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.319274 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.338923 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.379087 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-config\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.379159 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-svc\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.379193 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.379218 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.379271 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2psvx\" (UniqueName: \"kubernetes.io/projected/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-kube-api-access-2psvx\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.379423 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.380571 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-nb\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.380654 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-sb\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.380729 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-config\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.381340 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-svc\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.381489 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-swift-storage-0\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.387908 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.397659 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2psvx\" (UniqueName: \"kubernetes.io/projected/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-kube-api-access-2psvx\") pod \"dnsmasq-dns-6f6f8cb849-5d7pk\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.423921 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.445637 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:16 crc kubenswrapper[5102]: W0123 07:15:16.487819 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6e285ca7_c27a_4db2_a96e_3f9671b1f4b7.slice/crio-b6d0a210e903800a2d0ca0fe6c2aeeaab20eb7fc0b977e7764c64b8d0658fd57 WatchSource:0}: Error finding container b6d0a210e903800a2d0ca0fe6c2aeeaab20eb7fc0b977e7764c64b8d0658fd57: Status 404 returned error can't find the container with id b6d0a210e903800a2d0ca0fe6c2aeeaab20eb7fc0b977e7764c64b8d0658fd57 Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.489324 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-hjvgh"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.583290 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-swift-storage-0\") pod \"b36f470b-1f30-4301-9857-ee452434444f\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.583680 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f8sm8\" (UniqueName: \"kubernetes.io/projected/b36f470b-1f30-4301-9857-ee452434444f-kube-api-access-f8sm8\") pod \"b36f470b-1f30-4301-9857-ee452434444f\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.583730 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-nb\") pod \"b36f470b-1f30-4301-9857-ee452434444f\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.583781 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-config\") pod \"b36f470b-1f30-4301-9857-ee452434444f\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.583894 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-sb\") pod \"b36f470b-1f30-4301-9857-ee452434444f\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.583915 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-svc\") pod \"b36f470b-1f30-4301-9857-ee452434444f\" (UID: \"b36f470b-1f30-4301-9857-ee452434444f\") " Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.647156 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:16 crc kubenswrapper[5102]: E0123 07:15:16.648148 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b36f470b-1f30-4301-9857-ee452434444f" containerName="dnsmasq-dns" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.651277 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b36f470b-1f30-4301-9857-ee452434444f" containerName="dnsmasq-dns" Jan 23 07:15:16 crc kubenswrapper[5102]: E0123 07:15:16.651332 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b36f470b-1f30-4301-9857-ee452434444f" containerName="init" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.651345 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b36f470b-1f30-4301-9857-ee452434444f" containerName="init" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.651916 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b36f470b-1f30-4301-9857-ee452434444f" containerName="dnsmasq-dns" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.655577 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b36f470b-1f30-4301-9857-ee452434444f-kube-api-access-f8sm8" (OuterVolumeSpecName: "kube-api-access-f8sm8") pod "b36f470b-1f30-4301-9857-ee452434444f" (UID: "b36f470b-1f30-4301-9857-ee452434444f"). InnerVolumeSpecName "kube-api-access-f8sm8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.657606 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.663601 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.663888 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.664560 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-glance-dockercfg-vxrkv" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.665882 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-scripts" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.688778 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.692378 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f8sm8\" (UniqueName: \"kubernetes.io/projected/b36f470b-1f30-4301-9857-ee452434444f-kube-api-access-f8sm8\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.712959 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.729989 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "b36f470b-1f30-4301-9857-ee452434444f" (UID: "b36f470b-1f30-4301-9857-ee452434444f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.732774 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.737509 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.739082 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.742224 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.742932 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "b36f470b-1f30-4301-9857-ee452434444f" (UID: "b36f470b-1f30-4301-9857-ee452434444f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.763832 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fdbfbc95f-r8vzb"] Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.767464 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-config" (OuterVolumeSpecName: "config") pod "b36f470b-1f30-4301-9857-ee452434444f" (UID: "b36f470b-1f30-4301-9857-ee452434444f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.768728 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.768789 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.768842 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.770137 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a733638e06484172d6918735e8bb55956644c4519eef105a0e4f5d17b554c3be"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.770215 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://a733638e06484172d6918735e8bb55956644c4519eef105a0e4f5d17b554c3be" gracePeriod=600 Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.779740 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "b36f470b-1f30-4301-9857-ee452434444f" (UID: "b36f470b-1f30-4301-9857-ee452434444f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.784606 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "b36f470b-1f30-4301-9857-ee452434444f" (UID: "b36f470b-1f30-4301-9857-ee452434444f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803189 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803316 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7hvwr\" (UniqueName: \"kubernetes.io/projected/c3253369-4128-4424-87c0-4cea6e376eb0-kube-api-access-7hvwr\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803391 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803437 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803468 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803496 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-config-data\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803586 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5nlh4\" (UniqueName: \"kubernetes.io/projected/d1238958-d52a-46bc-8a2b-1f9f0452b10e-kube-api-access-5nlh4\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803634 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803661 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803688 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803738 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.803777 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-logs\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.804176 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.804370 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.804840 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.805305 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-scripts\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.805503 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.805529 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.805671 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.805685 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.805699 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/b36f470b-1f30-4301-9857-ee452434444f-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911348 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7hvwr\" (UniqueName: \"kubernetes.io/projected/c3253369-4128-4424-87c0-4cea6e376eb0-kube-api-access-7hvwr\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911489 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911589 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911642 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911677 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-config-data\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911719 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5nlh4\" (UniqueName: \"kubernetes.io/projected/d1238958-d52a-46bc-8a2b-1f9f0452b10e-kube-api-access-5nlh4\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911767 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911793 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911813 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911851 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911874 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-logs\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.911949 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.912015 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.912069 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.912089 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-scripts\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.912125 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.916223 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.925649 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-logs\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.928013 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-logs\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.928271 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.928375 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.932421 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.938375 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-scripts\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.940455 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.941432 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-scripts\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.941773 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-config-data\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.948093 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-config-data\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.951608 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.952695 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7hvwr\" (UniqueName: \"kubernetes.io/projected/c3253369-4128-4424-87c0-4cea6e376eb0-kube-api-access-7hvwr\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.953448 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5nlh4\" (UniqueName: \"kubernetes.io/projected/d1238958-d52a-46bc-8a2b-1f9f0452b10e-kube-api-access-5nlh4\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.955317 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.967661 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.977573 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:16 crc kubenswrapper[5102]: I0123 07:15:16.984875 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.136404 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" event={"ID":"20d9b83d-5f21-4d20-a999-5f14c97751e1","Type":"ContainerStarted","Data":"fb8eabbc8a055209527088e04e873feec020468b1e5310fa148cba7680ff47c0"} Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.141482 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="a733638e06484172d6918735e8bb55956644c4519eef105a0e4f5d17b554c3be" exitCode=0 Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.141526 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"a733638e06484172d6918735e8bb55956644c4519eef105a0e4f5d17b554c3be"} Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.141589 5102 scope.go:117] "RemoveContainer" containerID="f472221ddd8fa6fce7a56b57a18ba14ffb89ee90fa252181919bdb5177527a31" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.143746 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" event={"ID":"b36f470b-1f30-4301-9857-ee452434444f","Type":"ContainerDied","Data":"1ba3533af6610d58a871f0abc0738b0272531476ff88ec60058ca040e51b410c"} Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.143853 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-74dfc89d77-gh7kw" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.145136 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hjvgh" event={"ID":"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7","Type":"ContainerStarted","Data":"b6d0a210e903800a2d0ca0fe6c2aeeaab20eb7fc0b977e7764c64b8d0658fd57"} Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.184389 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:15:17 crc kubenswrapper[5102]: W0123 07:15:17.235444 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode89c16d7_5d5b_4c9f_8b54_a43d59cf4262.slice/crio-0f50c88b41b6835a4d76349f7c33773a9cfb4233332166701e64fd389d0a1165 WatchSource:0}: Error finding container 0f50c88b41b6835a4d76349f7c33773a9cfb4233332166701e64fd389d0a1165: Status 404 returned error can't find the container with id 0f50c88b41b6835a4d76349f7c33773a9cfb4233332166701e64fd389d0a1165 Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.312811 5102 scope.go:117] "RemoveContainer" containerID="2c4b50095b99a86d87082e3b696d20ba8fb8c83a0af28a8ec8b271719de3cc2a" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.333126 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.347674 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-db-sync-2kgk7"] Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.375877 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.378266 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-db-sync-62l6f"] Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.386965 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-db-sync-d2mqk"] Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.394690 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-db-sync-c2dfg"] Jan 23 07:15:17 crc kubenswrapper[5102]: W0123 07:15:17.397910 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podad1791e1_86ab_44e5_99e9_399e93cffc68.slice/crio-aeb738f5f5811ebe874061614093cb98c94373eca748b6fd768610a66f199fbb WatchSource:0}: Error finding container aeb738f5f5811ebe874061614093cb98c94373eca748b6fd768610a66f199fbb: Status 404 returned error can't find the container with id aeb738f5f5811ebe874061614093cb98c94373eca748b6fd768610a66f199fbb Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.410564 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-74dfc89d77-gh7kw"] Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.419886 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-74dfc89d77-gh7kw"] Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.422896 5102 scope.go:117] "RemoveContainer" containerID="11d4406a839cd3e3eb77349ca0c8a770fcad219c8ea44e70b0a13b5ca24db773" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.543434 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-5d7pk"] Jan 23 07:15:17 crc kubenswrapper[5102]: W0123 07:15:17.567076 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d6a2bb1_58af_4994_81ff_15562e1f9a4f.slice/crio-b825d9747ead5a361a85d9a78b2effb435535a5499cfa8dfaab0cc79f85dc971 WatchSource:0}: Error finding container b825d9747ead5a361a85d9a78b2effb435535a5499cfa8dfaab0cc79f85dc971: Status 404 returned error can't find the container with id b825d9747ead5a361a85d9a78b2effb435535a5499cfa8dfaab0cc79f85dc971 Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.615296 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b36f470b-1f30-4301-9857-ee452434444f" path="/var/lib/kubelet/pods/b36f470b-1f30-4301-9857-ee452434444f/volumes" Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.788592 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.873605 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:17 crc kubenswrapper[5102]: I0123 07:15:17.958614 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.064307 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.168208 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d2mqk" event={"ID":"19832d65-364c-4340-9109-57b179d8a14c","Type":"ContainerStarted","Data":"350918eff16d686330206ab3a6e27eb47607f9378ed6d5a60f0567ef662eb7e0"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.168604 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d2mqk" event={"ID":"19832d65-364c-4340-9109-57b179d8a14c","Type":"ContainerStarted","Data":"633efd101d18e219e260b2492a8569fb89bbca8a0b054805fd08e2b6cab1339a"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.186141 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerStarted","Data":"0f50c88b41b6835a4d76349f7c33773a9cfb4233332166701e64fd389d0a1165"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.219587 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-db-sync-d2mqk" podStartSLOduration=3.219564439 podStartE2EDuration="3.219564439s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:18.199649339 +0000 UTC m=+1269.019998314" watchObservedRunningTime="2026-01-23 07:15:18.219564439 +0000 UTC m=+1269.039913414" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.233455 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"cdb7d8ca938d540b4197d3f803c2d9db00f127837a56b64d7ab62a996be59a8b"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.257847 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2kgk7" event={"ID":"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68","Type":"ContainerStarted","Data":"3076a0bec14bfc1b8e18ffcf8642761e06f2a0e2b2a610dec9c2365de0fa3798"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.270070 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3253369-4128-4424-87c0-4cea6e376eb0","Type":"ContainerStarted","Data":"23f50d7f60befb7773d3c35b606fa3f365fe070a842729edc7ce2dd200475df5"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.280233 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.284203 5102 generic.go:334] "Generic (PLEG): container finished" podID="20d9b83d-5f21-4d20-a999-5f14c97751e1" containerID="559895746b5e5adfb135e1712a3ff026c310da051e819d59de52526757d7f1f6" exitCode=0 Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.284512 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" event={"ID":"20d9b83d-5f21-4d20-a999-5f14c97751e1","Type":"ContainerDied","Data":"559895746b5e5adfb135e1712a3ff026c310da051e819d59de52526757d7f1f6"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.289947 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62l6f" event={"ID":"a9080948-c87d-49da-b53e-b5228f44a2d4","Type":"ContainerStarted","Data":"7e674bd58ad4d26d4a091884ec29657435a4787a5f0541b031a4ca6a7f8a88c3"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.313028 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hjvgh" event={"ID":"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7","Type":"ContainerStarted","Data":"fab6d58a2b964d0a2c6ee9afb772804f0d0c70b36c9b83bbb60b32735ec7b64d"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.319187 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c2dfg" event={"ID":"ad1791e1-86ab-44e5-99e9-399e93cffc68","Type":"ContainerStarted","Data":"aeb738f5f5811ebe874061614093cb98c94373eca748b6fd768610a66f199fbb"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.330917 5102 generic.go:334] "Generic (PLEG): container finished" podID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerID="14575420a64edb55b314ff4ac6ea25e54273a0c20daa2c997107bf81eb7926fc" exitCode=0 Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.330985 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" event={"ID":"0d6a2bb1-58af-4994-81ff-15562e1f9a4f","Type":"ContainerDied","Data":"14575420a64edb55b314ff4ac6ea25e54273a0c20daa2c997107bf81eb7926fc"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.331015 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" event={"ID":"0d6a2bb1-58af-4994-81ff-15562e1f9a4f","Type":"ContainerStarted","Data":"b825d9747ead5a361a85d9a78b2effb435535a5499cfa8dfaab0cc79f85dc971"} Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.368077 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-hjvgh" podStartSLOduration=3.36805188 podStartE2EDuration="3.36805188s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:18.339934025 +0000 UTC m=+1269.160283020" watchObservedRunningTime="2026-01-23 07:15:18.36805188 +0000 UTC m=+1269.188400855" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.770266 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.852373 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-sb\") pod \"20d9b83d-5f21-4d20-a999-5f14c97751e1\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.852434 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-config\") pod \"20d9b83d-5f21-4d20-a999-5f14c97751e1\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.852481 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-nb\") pod \"20d9b83d-5f21-4d20-a999-5f14c97751e1\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.852522 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-svc\") pod \"20d9b83d-5f21-4d20-a999-5f14c97751e1\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.852610 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-swift-storage-0\") pod \"20d9b83d-5f21-4d20-a999-5f14c97751e1\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.852638 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vcplr\" (UniqueName: \"kubernetes.io/projected/20d9b83d-5f21-4d20-a999-5f14c97751e1-kube-api-access-vcplr\") pod \"20d9b83d-5f21-4d20-a999-5f14c97751e1\" (UID: \"20d9b83d-5f21-4d20-a999-5f14c97751e1\") " Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.866388 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20d9b83d-5f21-4d20-a999-5f14c97751e1-kube-api-access-vcplr" (OuterVolumeSpecName: "kube-api-access-vcplr") pod "20d9b83d-5f21-4d20-a999-5f14c97751e1" (UID: "20d9b83d-5f21-4d20-a999-5f14c97751e1"). InnerVolumeSpecName "kube-api-access-vcplr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.879752 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-config" (OuterVolumeSpecName: "config") pod "20d9b83d-5f21-4d20-a999-5f14c97751e1" (UID: "20d9b83d-5f21-4d20-a999-5f14c97751e1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.883382 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "20d9b83d-5f21-4d20-a999-5f14c97751e1" (UID: "20d9b83d-5f21-4d20-a999-5f14c97751e1"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.891752 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "20d9b83d-5f21-4d20-a999-5f14c97751e1" (UID: "20d9b83d-5f21-4d20-a999-5f14c97751e1"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.906234 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "20d9b83d-5f21-4d20-a999-5f14c97751e1" (UID: "20d9b83d-5f21-4d20-a999-5f14c97751e1"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.927088 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "20d9b83d-5f21-4d20-a999-5f14c97751e1" (UID: "20d9b83d-5f21-4d20-a999-5f14c97751e1"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.959123 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.959162 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vcplr\" (UniqueName: \"kubernetes.io/projected/20d9b83d-5f21-4d20-a999-5f14c97751e1-kube-api-access-vcplr\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.959176 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.959187 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.959196 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:18 crc kubenswrapper[5102]: I0123 07:15:18.959204 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/20d9b83d-5f21-4d20-a999-5f14c97751e1-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.373268 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.374497 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-5fdbfbc95f-r8vzb" event={"ID":"20d9b83d-5f21-4d20-a999-5f14c97751e1","Type":"ContainerDied","Data":"fb8eabbc8a055209527088e04e873feec020468b1e5310fa148cba7680ff47c0"} Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.374587 5102 scope.go:117] "RemoveContainer" containerID="559895746b5e5adfb135e1712a3ff026c310da051e819d59de52526757d7f1f6" Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.377380 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d1238958-d52a-46bc-8a2b-1f9f0452b10e","Type":"ContainerStarted","Data":"095b7852be22a8d955093cbd7f941913280e344a6056c936e411375713b5d0df"} Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.392074 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" event={"ID":"0d6a2bb1-58af-4994-81ff-15562e1f9a4f","Type":"ContainerStarted","Data":"d0d9ee8920f578f474cc5802e280483068b7dd4c912f6141ece58d9c34aba17d"} Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.394981 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.399927 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3253369-4128-4424-87c0-4cea6e376eb0","Type":"ContainerStarted","Data":"a892743d843e347d3ff7721eedd2e75b5853f420cbcda5c097ed19c84bb407db"} Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.429420 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" podStartSLOduration=4.429400427 podStartE2EDuration="4.429400427s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:19.420144388 +0000 UTC m=+1270.240493373" watchObservedRunningTime="2026-01-23 07:15:19.429400427 +0000 UTC m=+1270.249749402" Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.521616 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-5fdbfbc95f-r8vzb"] Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.550221 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-5fdbfbc95f-r8vzb"] Jan 23 07:15:19 crc kubenswrapper[5102]: I0123 07:15:19.678227 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20d9b83d-5f21-4d20-a999-5f14c97751e1" path="/var/lib/kubelet/pods/20d9b83d-5f21-4d20-a999-5f14c97751e1/volumes" Jan 23 07:15:20 crc kubenswrapper[5102]: I0123 07:15:20.446303 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d1238958-d52a-46bc-8a2b-1f9f0452b10e","Type":"ContainerStarted","Data":"147306ea3d266e4ed130ad3a78e0f53bb803214c0300c3caf3cdb0930c597e1f"} Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.460584 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3253369-4128-4424-87c0-4cea6e376eb0","Type":"ContainerStarted","Data":"98540ccd719700096336a8828100db6e97934cb9f3593207017db4325e966090"} Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.460791 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-log" containerID="cri-o://a892743d843e347d3ff7721eedd2e75b5853f420cbcda5c097ed19c84bb407db" gracePeriod=30 Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.461514 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-httpd" containerID="cri-o://98540ccd719700096336a8828100db6e97934cb9f3593207017db4325e966090" gracePeriod=30 Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.470640 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-log" containerID="cri-o://147306ea3d266e4ed130ad3a78e0f53bb803214c0300c3caf3cdb0930c597e1f" gracePeriod=30 Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.470957 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d1238958-d52a-46bc-8a2b-1f9f0452b10e","Type":"ContainerStarted","Data":"9953f3268776e0d21efc7ee7fe6cb655efd52cd7160e04454a1ed9b4ec3bb613"} Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.471056 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-httpd" containerID="cri-o://9953f3268776e0d21efc7ee7fe6cb655efd52cd7160e04454a1ed9b4ec3bb613" gracePeriod=30 Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.497078 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=6.497044977 podStartE2EDuration="6.497044977s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:21.484708544 +0000 UTC m=+1272.305057519" watchObservedRunningTime="2026-01-23 07:15:21.497044977 +0000 UTC m=+1272.317393962" Jan 23 07:15:21 crc kubenswrapper[5102]: I0123 07:15:21.520034 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=6.520008022 podStartE2EDuration="6.520008022s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:21.511016342 +0000 UTC m=+1272.331365327" watchObservedRunningTime="2026-01-23 07:15:21.520008022 +0000 UTC m=+1272.340356997" Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.489358 5102 generic.go:334] "Generic (PLEG): container finished" podID="c3253369-4128-4424-87c0-4cea6e376eb0" containerID="98540ccd719700096336a8828100db6e97934cb9f3593207017db4325e966090" exitCode=0 Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.489911 5102 generic.go:334] "Generic (PLEG): container finished" podID="c3253369-4128-4424-87c0-4cea6e376eb0" containerID="a892743d843e347d3ff7721eedd2e75b5853f420cbcda5c097ed19c84bb407db" exitCode=143 Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.489441 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3253369-4128-4424-87c0-4cea6e376eb0","Type":"ContainerDied","Data":"98540ccd719700096336a8828100db6e97934cb9f3593207017db4325e966090"} Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.490168 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3253369-4128-4424-87c0-4cea6e376eb0","Type":"ContainerDied","Data":"a892743d843e347d3ff7721eedd2e75b5853f420cbcda5c097ed19c84bb407db"} Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.496658 5102 generic.go:334] "Generic (PLEG): container finished" podID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerID="9953f3268776e0d21efc7ee7fe6cb655efd52cd7160e04454a1ed9b4ec3bb613" exitCode=0 Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.496676 5102 generic.go:334] "Generic (PLEG): container finished" podID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerID="147306ea3d266e4ed130ad3a78e0f53bb803214c0300c3caf3cdb0930c597e1f" exitCode=143 Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.496700 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d1238958-d52a-46bc-8a2b-1f9f0452b10e","Type":"ContainerDied","Data":"9953f3268776e0d21efc7ee7fe6cb655efd52cd7160e04454a1ed9b4ec3bb613"} Jan 23 07:15:22 crc kubenswrapper[5102]: I0123 07:15:22.496733 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d1238958-d52a-46bc-8a2b-1f9f0452b10e","Type":"ContainerDied","Data":"147306ea3d266e4ed130ad3a78e0f53bb803214c0300c3caf3cdb0930c597e1f"} Jan 23 07:15:23 crc kubenswrapper[5102]: I0123 07:15:23.511509 5102 generic.go:334] "Generic (PLEG): container finished" podID="6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" containerID="fab6d58a2b964d0a2c6ee9afb772804f0d0c70b36c9b83bbb60b32735ec7b64d" exitCode=0 Jan 23 07:15:23 crc kubenswrapper[5102]: I0123 07:15:23.511622 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hjvgh" event={"ID":"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7","Type":"ContainerDied","Data":"fab6d58a2b964d0a2c6ee9afb772804f0d0c70b36c9b83bbb60b32735ec7b64d"} Jan 23 07:15:26 crc kubenswrapper[5102]: I0123 07:15:26.426659 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:15:26 crc kubenswrapper[5102]: I0123 07:15:26.494007 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8db84466c-tqtcx"] Jan 23 07:15:26 crc kubenswrapper[5102]: I0123 07:15:26.494339 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="dnsmasq-dns" containerID="cri-o://784e454f669a640e0d691a762f7f2be1680040e25c08425b68c7dfbfbec03c66" gracePeriod=10 Jan 23 07:15:27 crc kubenswrapper[5102]: I0123 07:15:27.572524 5102 generic.go:334] "Generic (PLEG): container finished" podID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerID="784e454f669a640e0d691a762f7f2be1680040e25c08425b68c7dfbfbec03c66" exitCode=0 Jan 23 07:15:27 crc kubenswrapper[5102]: I0123 07:15:27.572573 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" event={"ID":"1de6d484-6cfc-4529-a911-9ee8058ae867","Type":"ContainerDied","Data":"784e454f669a640e0d691a762f7f2be1680040e25c08425b68c7dfbfbec03c66"} Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.470447 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.471488 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.585704 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-hjvgh" event={"ID":"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7","Type":"ContainerDied","Data":"b6d0a210e903800a2d0ca0fe6c2aeeaab20eb7fc0b977e7764c64b8d0658fd57"} Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.585755 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b6d0a210e903800a2d0ca0fe6c2aeeaab20eb7fc0b977e7764c64b8d0658fd57" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.585717 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-hjvgh" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.595291 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"c3253369-4128-4424-87c0-4cea6e376eb0","Type":"ContainerDied","Data":"23f50d7f60befb7773d3c35b606fa3f365fe070a842729edc7ce2dd200475df5"} Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.595399 5102 scope.go:117] "RemoveContainer" containerID="98540ccd719700096336a8828100db6e97934cb9f3593207017db4325e966090" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.595412 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.630378 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-credential-keys\") pod \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.630883 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-config-data\") pod \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631066 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-scripts\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631154 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-internal-tls-certs\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631196 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n9kd6\" (UniqueName: \"kubernetes.io/projected/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-kube-api-access-n9kd6\") pod \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631235 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-combined-ca-bundle\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631290 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-httpd-run\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631324 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-config-data\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631346 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-combined-ca-bundle\") pod \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631393 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-logs\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631550 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-scripts\") pod \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631591 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-fernet-keys\") pod \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\" (UID: \"6e285ca7-c27a-4db2-a96e-3f9671b1f4b7\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631615 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7hvwr\" (UniqueName: \"kubernetes.io/projected/c3253369-4128-4424-87c0-4cea6e376eb0-kube-api-access-7hvwr\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.631634 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"c3253369-4128-4424-87c0-4cea6e376eb0\" (UID: \"c3253369-4128-4424-87c0-4cea6e376eb0\") " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.633418 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-logs" (OuterVolumeSpecName: "logs") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.634629 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.639512 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.641614 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" (UID: "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.646373 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" (UID: "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.651394 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-scripts" (OuterVolumeSpecName: "scripts") pod "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" (UID: "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.662439 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-scripts" (OuterVolumeSpecName: "scripts") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.667044 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c3253369-4128-4424-87c0-4cea6e376eb0-kube-api-access-7hvwr" (OuterVolumeSpecName: "kube-api-access-7hvwr") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "kube-api-access-7hvwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.675386 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-kube-api-access-n9kd6" (OuterVolumeSpecName: "kube-api-access-n9kd6") pod "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" (UID: "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7"). InnerVolumeSpecName "kube-api-access-n9kd6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.690787 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-config-data" (OuterVolumeSpecName: "config-data") pod "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" (UID: "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.710365 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" (UID: "6e285ca7-c27a-4db2-a96e-3f9671b1f4b7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.714130 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-config-data" (OuterVolumeSpecName: "config-data") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733701 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733726 5102 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733738 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7hvwr\" (UniqueName: \"kubernetes.io/projected/c3253369-4128-4424-87c0-4cea6e376eb0-kube-api-access-7hvwr\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733767 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733777 5102 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733785 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733794 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733802 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n9kd6\" (UniqueName: \"kubernetes.io/projected/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-kube-api-access-n9kd6\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733809 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733817 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733825 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.733836 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/c3253369-4128-4424-87c0-4cea6e376eb0-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.747448 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.753774 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "c3253369-4128-4424-87c0-4cea6e376eb0" (UID: "c3253369-4128-4424-87c0-4cea6e376eb0"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.758119 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.836850 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.836906 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.836922 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/c3253369-4128-4424-87c0-4cea6e376eb0-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.938444 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.947275 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.963632 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:28 crc kubenswrapper[5102]: E0123 07:15:28.964174 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" containerName="keystone-bootstrap" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964197 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" containerName="keystone-bootstrap" Jan 23 07:15:28 crc kubenswrapper[5102]: E0123 07:15:28.964218 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-log" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964226 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-log" Jan 23 07:15:28 crc kubenswrapper[5102]: E0123 07:15:28.964243 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20d9b83d-5f21-4d20-a999-5f14c97751e1" containerName="init" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964250 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20d9b83d-5f21-4d20-a999-5f14c97751e1" containerName="init" Jan 23 07:15:28 crc kubenswrapper[5102]: E0123 07:15:28.964265 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-httpd" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964276 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-httpd" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964444 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-httpd" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964470 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" containerName="glance-log" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964483 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" containerName="keystone-bootstrap" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.964504 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20d9b83d-5f21-4d20-a999-5f14c97751e1" containerName="init" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.965682 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.971796 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.972102 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 23 07:15:28 crc kubenswrapper[5102]: I0123 07:15:28.977655 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.142669 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.142755 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.142787 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.143075 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tvbc6\" (UniqueName: \"kubernetes.io/projected/35f0a765-fbac-4583-ade4-5ecb8d6d3264-kube-api-access-tvbc6\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.143153 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-logs\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.143277 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-scripts\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.143409 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.143699 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-config-data\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.245743 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-config-data\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.245877 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.245909 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.245933 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.245979 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tvbc6\" (UniqueName: \"kubernetes.io/projected/35f0a765-fbac-4583-ade4-5ecb8d6d3264-kube-api-access-tvbc6\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.246004 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-logs\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.246041 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-scripts\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.246089 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.246340 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.247180 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-logs\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.250924 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.254111 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-config-data\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.255365 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.255994 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.264078 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tvbc6\" (UniqueName: \"kubernetes.io/projected/35f0a765-fbac-4583-ade4-5ecb8d6d3264-kube-api-access-tvbc6\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.278220 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-scripts\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.286109 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.587623 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.631500 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c3253369-4128-4424-87c0-4cea6e376eb0" path="/var/lib/kubelet/pods/c3253369-4128-4424-87c0-4cea6e376eb0/volumes" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.638048 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-hjvgh"] Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.655636 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-hjvgh"] Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.690479 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-bootstrap-zqc5s"] Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.692290 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.699495 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"osp-secret" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.699652 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-f5hn2" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.699848 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.699922 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.700526 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.718696 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zqc5s"] Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.758086 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-fernet-keys\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.758290 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-scripts\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.758407 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-credential-keys\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.758444 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-combined-ca-bundle\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.758477 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-config-data\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.758631 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hpnfz\" (UniqueName: \"kubernetes.io/projected/5dcb8a22-2a42-4baf-8b27-7041f960563c-kube-api-access-hpnfz\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.861259 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-scripts\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.861466 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-credential-keys\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.861499 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-combined-ca-bundle\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.861521 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-config-data\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.861580 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hpnfz\" (UniqueName: \"kubernetes.io/projected/5dcb8a22-2a42-4baf-8b27-7041f960563c-kube-api-access-hpnfz\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.861660 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-fernet-keys\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.872042 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-credential-keys\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.872435 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-scripts\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.872567 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-combined-ca-bundle\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.873165 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-config-data\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.877108 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-fernet-keys\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:29 crc kubenswrapper[5102]: I0123 07:15:29.882109 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hpnfz\" (UniqueName: \"kubernetes.io/projected/5dcb8a22-2a42-4baf-8b27-7041f960563c-kube-api-access-hpnfz\") pod \"keystone-bootstrap-zqc5s\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:30 crc kubenswrapper[5102]: I0123 07:15:30.020941 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:31 crc kubenswrapper[5102]: I0123 07:15:31.620336 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6e285ca7-c27a-4db2-a96e-3f9671b1f4b7" path="/var/lib/kubelet/pods/6e285ca7-c27a-4db2-a96e-3f9671b1f4b7/volumes" Jan 23 07:15:33 crc kubenswrapper[5102]: I0123 07:15:33.446451 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Jan 23 07:15:38 crc kubenswrapper[5102]: I0123 07:15:38.447858 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Jan 23 07:15:39 crc kubenswrapper[5102]: I0123 07:15:39.787468 5102 scope.go:117] "RemoveContainer" containerID="a892743d843e347d3ff7721eedd2e75b5853f420cbcda5c097ed19c84bb407db" Jan 23 07:15:39 crc kubenswrapper[5102]: E0123 07:15:39.822123 5102 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying config: context canceled" image="quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49" Jan 23 07:15:39 crc kubenswrapper[5102]: E0123 07:15:39.822324 5102 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:cinder-db-sync,Image:quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49,Command:[/bin/bash],Args:[-c /usr/local/bin/kolla_set_configs && /usr/local/bin/kolla_start],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{EnvVar{Name:KOLLA_BOOTSTRAP,Value:TRUE,ValueFrom:nil,},EnvVar{Name:KOLLA_CONFIG_STRATEGY,Value:COPY_ALWAYS,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:etc-machine-id,ReadOnly:true,MountPath:/etc/machine-id,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:scripts,ReadOnly:true,MountPath:/usr/local/bin/container-scripts,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/config-data/merged,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/etc/my.cnf,SubPath:my.cnf,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:db-sync-config-data,ReadOnly:true,MountPath:/etc/cinder/cinder.conf.d,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:config-data,ReadOnly:true,MountPath:/var/lib/kolla/config_files/config.json,SubPath:db-sync-config.json,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:combined-ca-bundle,ReadOnly:true,MountPath:/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem,SubPath:tls-ca-bundle.pem,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-f67t9,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:nil,Privileged:nil,SELinuxOptions:nil,RunAsUser:*0,RunAsNonRoot:nil,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:nil,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:File,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod cinder-db-sync-c2dfg_openstack(ad1791e1-86ab-44e5-99e9-399e93cffc68): ErrImagePull: rpc error: code = Canceled desc = copying config: context canceled" logger="UnhandledError" Jan 23 07:15:39 crc kubenswrapper[5102]: E0123 07:15:39.823552 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ErrImagePull: \"rpc error: code = Canceled desc = copying config: context canceled\"" pod="openstack/cinder-db-sync-c2dfg" podUID="ad1791e1-86ab-44e5-99e9-399e93cffc68" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.066144 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.072677 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.211648 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-combined-ca-bundle\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212218 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-httpd-run\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212438 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-swift-storage-0\") pod \"1de6d484-6cfc-4529-a911-9ee8058ae867\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212532 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-svc\") pod \"1de6d484-6cfc-4529-a911-9ee8058ae867\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212596 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5nlh4\" (UniqueName: \"kubernetes.io/projected/d1238958-d52a-46bc-8a2b-1f9f0452b10e-kube-api-access-5nlh4\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212674 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212743 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-config\") pod \"1de6d484-6cfc-4529-a911-9ee8058ae867\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212799 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-public-tls-certs\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212844 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-scripts\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212886 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-logs\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212912 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58nnb\" (UniqueName: \"kubernetes.io/projected/1de6d484-6cfc-4529-a911-9ee8058ae867-kube-api-access-58nnb\") pod \"1de6d484-6cfc-4529-a911-9ee8058ae867\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212959 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-sb\") pod \"1de6d484-6cfc-4529-a911-9ee8058ae867\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.212994 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-nb\") pod \"1de6d484-6cfc-4529-a911-9ee8058ae867\" (UID: \"1de6d484-6cfc-4529-a911-9ee8058ae867\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.213086 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-config-data\") pod \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\" (UID: \"d1238958-d52a-46bc-8a2b-1f9f0452b10e\") " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.213091 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.215231 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.215655 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-logs" (OuterVolumeSpecName: "logs") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.220659 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-scripts" (OuterVolumeSpecName: "scripts") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.225903 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1238958-d52a-46bc-8a2b-1f9f0452b10e-kube-api-access-5nlh4" (OuterVolumeSpecName: "kube-api-access-5nlh4") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "kube-api-access-5nlh4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.226232 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1de6d484-6cfc-4529-a911-9ee8058ae867-kube-api-access-58nnb" (OuterVolumeSpecName: "kube-api-access-58nnb") pod "1de6d484-6cfc-4529-a911-9ee8058ae867" (UID: "1de6d484-6cfc-4529-a911-9ee8058ae867"). InnerVolumeSpecName "kube-api-access-58nnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.233911 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.259386 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1de6d484-6cfc-4529-a911-9ee8058ae867" (UID: "1de6d484-6cfc-4529-a911-9ee8058ae867"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.261253 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.270267 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-config" (OuterVolumeSpecName: "config") pod "1de6d484-6cfc-4529-a911-9ee8058ae867" (UID: "1de6d484-6cfc-4529-a911-9ee8058ae867"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.281589 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-config-data" (OuterVolumeSpecName: "config-data") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.286985 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1de6d484-6cfc-4529-a911-9ee8058ae867" (UID: "1de6d484-6cfc-4529-a911-9ee8058ae867"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.288396 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "d1238958-d52a-46bc-8a2b-1f9f0452b10e" (UID: "d1238958-d52a-46bc-8a2b-1f9f0452b10e"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.293309 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1de6d484-6cfc-4529-a911-9ee8058ae867" (UID: "1de6d484-6cfc-4529-a911-9ee8058ae867"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.297556 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1de6d484-6cfc-4529-a911-9ee8058ae867" (UID: "1de6d484-6cfc-4529-a911-9ee8058ae867"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317482 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317550 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5nlh4\" (UniqueName: \"kubernetes.io/projected/d1238958-d52a-46bc-8a2b-1f9f0452b10e-kube-api-access-5nlh4\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317592 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317604 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317616 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317630 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317639 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d1238958-d52a-46bc-8a2b-1f9f0452b10e-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317649 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58nnb\" (UniqueName: \"kubernetes.io/projected/1de6d484-6cfc-4529-a911-9ee8058ae867-kube-api-access-58nnb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317661 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317671 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317686 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317698 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d1238958-d52a-46bc-8a2b-1f9f0452b10e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.317709 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1de6d484-6cfc-4529-a911-9ee8058ae867-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.339875 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.363574 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-bootstrap-zqc5s"] Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.419305 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.563293 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.750712 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zqc5s" event={"ID":"5dcb8a22-2a42-4baf-8b27-7041f960563c","Type":"ContainerStarted","Data":"5078354f12a1faec162881f83a1d700354f0688d65cca708e2c62816bdad5e58"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.751216 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zqc5s" event={"ID":"5dcb8a22-2a42-4baf-8b27-7041f960563c","Type":"ContainerStarted","Data":"c519bc978abc2ae44d20752cfdef86f62da801ee0a5307181a31ea0f93059d4f"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.768946 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"35f0a765-fbac-4583-ade4-5ecb8d6d3264","Type":"ContainerStarted","Data":"91f6632c5ebfcbf1dfd15e1801623e834944f0f73a77ff987834985e15a4d1ac"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.773158 5102 generic.go:334] "Generic (PLEG): container finished" podID="19832d65-364c-4340-9109-57b179d8a14c" containerID="350918eff16d686330206ab3a6e27eb47607f9378ed6d5a60f0567ef662eb7e0" exitCode=0 Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.773225 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d2mqk" event={"ID":"19832d65-364c-4340-9109-57b179d8a14c","Type":"ContainerDied","Data":"350918eff16d686330206ab3a6e27eb47607f9378ed6d5a60f0567ef662eb7e0"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.783280 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-bootstrap-zqc5s" podStartSLOduration=11.783257075 podStartE2EDuration="11.783257075s" podCreationTimestamp="2026-01-23 07:15:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:40.780891421 +0000 UTC m=+1291.601240396" watchObservedRunningTime="2026-01-23 07:15:40.783257075 +0000 UTC m=+1291.603606050" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.787534 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerStarted","Data":"2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.794038 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" event={"ID":"1de6d484-6cfc-4529-a911-9ee8058ae867","Type":"ContainerDied","Data":"916d0db5896dacb2f6c9e0808503a29787b7a335b1e9f04f871b9d66b148eff0"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.794080 5102 scope.go:117] "RemoveContainer" containerID="784e454f669a640e0d691a762f7f2be1680040e25c08425b68c7dfbfbec03c66" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.794235 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.807010 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2kgk7" event={"ID":"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68","Type":"ContainerStarted","Data":"72ddf8ff49e5a3193c48bebbeb2acb4f025af8aea7f170ec0287cc32ad8e0c2e"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.827167 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"d1238958-d52a-46bc-8a2b-1f9f0452b10e","Type":"ContainerDied","Data":"095b7852be22a8d955093cbd7f941913280e344a6056c936e411375713b5d0df"} Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.827894 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.839606 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-db-sync-2kgk7" podStartSLOduration=3.409115247 podStartE2EDuration="25.839575337s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="2026-01-23 07:15:17.333627981 +0000 UTC m=+1268.153976956" lastFinishedPulling="2026-01-23 07:15:39.764088061 +0000 UTC m=+1290.584437046" observedRunningTime="2026-01-23 07:15:40.827004457 +0000 UTC m=+1291.647353432" watchObservedRunningTime="2026-01-23 07:15:40.839575337 +0000 UTC m=+1291.659924332" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.851891 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62l6f" event={"ID":"a9080948-c87d-49da-b53e-b5228f44a2d4","Type":"ContainerStarted","Data":"1088d9848b17f47b3d88ce07e62e66b5442e4c6d98cebfd472e902a697152235"} Jan 23 07:15:40 crc kubenswrapper[5102]: E0123 07:15:40.863988 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cinder-db-sync\" with ImagePullBackOff: \"Back-off pulling image \\\"quay.io/podified-antelope-centos9/openstack-cinder-api@sha256:b59b7445e581cc720038107e421371c86c5765b2967e77d884ef29b1d9fd0f49\\\"\"" pod="openstack/cinder-db-sync-c2dfg" podUID="ad1791e1-86ab-44e5-99e9-399e93cffc68" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.874988 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-db-sync-62l6f" podStartSLOduration=3.412695949 podStartE2EDuration="25.874968449s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="2026-01-23 07:15:17.381909673 +0000 UTC m=+1268.202258648" lastFinishedPulling="2026-01-23 07:15:39.844182163 +0000 UTC m=+1290.664531148" observedRunningTime="2026-01-23 07:15:40.87178343 +0000 UTC m=+1291.692132405" watchObservedRunningTime="2026-01-23 07:15:40.874968449 +0000 UTC m=+1291.695317424" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.880101 5102 scope.go:117] "RemoveContainer" containerID="741cc1b445f0292f2345423583736ecbf8ebe8026251380cadfbaf2d7229e678" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.923595 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-8db84466c-tqtcx"] Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.943617 5102 scope.go:117] "RemoveContainer" containerID="9953f3268776e0d21efc7ee7fe6cb655efd52cd7160e04454a1ed9b4ec3bb613" Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.946611 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-8db84466c-tqtcx"] Jan 23 07:15:40 crc kubenswrapper[5102]: I0123 07:15:40.991306 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.002709 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011103 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:41 crc kubenswrapper[5102]: E0123 07:15:41.011525 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="dnsmasq-dns" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011560 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="dnsmasq-dns" Jan 23 07:15:41 crc kubenswrapper[5102]: E0123 07:15:41.011573 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="init" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011580 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="init" Jan 23 07:15:41 crc kubenswrapper[5102]: E0123 07:15:41.011600 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-httpd" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011609 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-httpd" Jan 23 07:15:41 crc kubenswrapper[5102]: E0123 07:15:41.011620 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-log" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011626 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-log" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011791 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="dnsmasq-dns" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011804 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-httpd" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.011820 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" containerName="glance-log" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.012835 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.017033 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.017770 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.035082 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.048801 5102 scope.go:117] "RemoveContainer" containerID="147306ea3d266e4ed130ad3a78e0f53bb803214c0300c3caf3cdb0930c597e1f" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.154264 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.154346 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.154411 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.154522 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-logs\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.154575 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-scripts\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.154910 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wdcqn\" (UniqueName: \"kubernetes.io/projected/10554b39-ce02-4ee0-ba52-9e54f14065ad-kube-api-access-wdcqn\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.155032 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-config-data\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.155177 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.257580 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.257682 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.257742 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.257824 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-logs\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.257873 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-scripts\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.257956 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wdcqn\" (UniqueName: \"kubernetes.io/projected/10554b39-ce02-4ee0-ba52-9e54f14065ad-kube-api-access-wdcqn\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.258007 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-config-data\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.258052 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.258502 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.258944 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.259425 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-logs\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.268449 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-scripts\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.273397 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-config-data\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.276658 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.278629 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.287672 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wdcqn\" (UniqueName: \"kubernetes.io/projected/10554b39-ce02-4ee0-ba52-9e54f14065ad-kube-api-access-wdcqn\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.301222 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.343072 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.613989 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" path="/var/lib/kubelet/pods/1de6d484-6cfc-4529-a911-9ee8058ae867/volumes" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.614786 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1238958-d52a-46bc-8a2b-1f9f0452b10e" path="/var/lib/kubelet/pods/d1238958-d52a-46bc-8a2b-1f9f0452b10e/volumes" Jan 23 07:15:41 crc kubenswrapper[5102]: I0123 07:15:41.865413 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"35f0a765-fbac-4583-ade4-5ecb8d6d3264","Type":"ContainerStarted","Data":"c176635075d963258985742562876369611ebdde0a76ef4ae19d6651aea95d35"} Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.327884 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.486760 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-combined-ca-bundle\") pod \"19832d65-364c-4340-9109-57b179d8a14c\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.486925 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8smfd\" (UniqueName: \"kubernetes.io/projected/19832d65-364c-4340-9109-57b179d8a14c-kube-api-access-8smfd\") pod \"19832d65-364c-4340-9109-57b179d8a14c\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.486955 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-config\") pod \"19832d65-364c-4340-9109-57b179d8a14c\" (UID: \"19832d65-364c-4340-9109-57b179d8a14c\") " Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.498799 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/19832d65-364c-4340-9109-57b179d8a14c-kube-api-access-8smfd" (OuterVolumeSpecName: "kube-api-access-8smfd") pod "19832d65-364c-4340-9109-57b179d8a14c" (UID: "19832d65-364c-4340-9109-57b179d8a14c"). InnerVolumeSpecName "kube-api-access-8smfd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.527579 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "19832d65-364c-4340-9109-57b179d8a14c" (UID: "19832d65-364c-4340-9109-57b179d8a14c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.532662 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-config" (OuterVolumeSpecName: "config") pod "19832d65-364c-4340-9109-57b179d8a14c" (UID: "19832d65-364c-4340-9109-57b179d8a14c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.595201 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.595790 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8smfd\" (UniqueName: \"kubernetes.io/projected/19832d65-364c-4340-9109-57b179d8a14c-kube-api-access-8smfd\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.595822 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/19832d65-364c-4340-9109-57b179d8a14c-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:42 crc kubenswrapper[5102]: I0123 07:15:42.600506 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.061531 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10554b39-ce02-4ee0-ba52-9e54f14065ad","Type":"ContainerStarted","Data":"c480f251b694bffcbb907c68fef63823aa9c08b64fa91b755fe32e8aef4ed094"} Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.085012 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-db-sync-d2mqk" event={"ID":"19832d65-364c-4340-9109-57b179d8a14c","Type":"ContainerDied","Data":"633efd101d18e219e260b2492a8569fb89bbca8a0b054805fd08e2b6cab1339a"} Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.085058 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="633efd101d18e219e260b2492a8569fb89bbca8a0b054805fd08e2b6cab1339a" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.085148 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-db-sync-d2mqk" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.103478 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerStarted","Data":"341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b"} Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.175314 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-685444497c-xk8fn"] Jan 23 07:15:43 crc kubenswrapper[5102]: E0123 07:15:43.175759 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="19832d65-364c-4340-9109-57b179d8a14c" containerName="neutron-db-sync" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.175779 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="19832d65-364c-4340-9109-57b179d8a14c" containerName="neutron-db-sync" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.175953 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="19832d65-364c-4340-9109-57b179d8a14c" containerName="neutron-db-sync" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.178058 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.206729 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-685444497c-xk8fn"] Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.290773 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-7f649ddc48-2nj2r"] Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.298132 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.304999 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-ovndbs" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.305285 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-config" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.305394 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-httpd-config" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.305491 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-neutron-dockercfg-scb4q" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.313362 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f649ddc48-2nj2r"] Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341326 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-nb\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341403 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-svc\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341520 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-swift-storage-0\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341580 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-combined-ca-bundle\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341615 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lpz25\" (UniqueName: \"kubernetes.io/projected/31653b49-9041-436e-a628-9334fab6d8d9-kube-api-access-lpz25\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341818 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-sb\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341896 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-ovndb-tls-certs\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341930 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-config\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341964 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-config\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.341998 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zqntl\" (UniqueName: \"kubernetes.io/projected/dff27ff3-187a-4e1d-9505-d02ab69bc244-kube-api-access-zqntl\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.342042 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-httpd-config\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443397 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-svc\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443500 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-swift-storage-0\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443553 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-combined-ca-bundle\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443602 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lpz25\" (UniqueName: \"kubernetes.io/projected/31653b49-9041-436e-a628-9334fab6d8d9-kube-api-access-lpz25\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443663 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-sb\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443712 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-ovndb-tls-certs\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443745 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-config\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443775 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-config\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443802 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zqntl\" (UniqueName: \"kubernetes.io/projected/dff27ff3-187a-4e1d-9505-d02ab69bc244-kube-api-access-zqntl\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443862 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-httpd-config\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.443887 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-nb\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.445046 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-nb\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.446661 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-sb\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.446830 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-swift-storage-0\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.446990 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-svc\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.447935 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-config\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.450302 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/dnsmasq-dns-8db84466c-tqtcx" podUID="1de6d484-6cfc-4529-a911-9ee8058ae867" containerName="dnsmasq-dns" probeResult="failure" output="dial tcp 10.217.0.128:5353: i/o timeout" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.460819 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-config\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.462677 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-combined-ca-bundle\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.463103 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-ovndb-tls-certs\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.463623 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-httpd-config\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.467452 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lpz25\" (UniqueName: \"kubernetes.io/projected/31653b49-9041-436e-a628-9334fab6d8d9-kube-api-access-lpz25\") pod \"neutron-7f649ddc48-2nj2r\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.488403 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zqntl\" (UniqueName: \"kubernetes.io/projected/dff27ff3-187a-4e1d-9505-d02ab69bc244-kube-api-access-zqntl\") pod \"dnsmasq-dns-685444497c-xk8fn\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.531757 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:43 crc kubenswrapper[5102]: I0123 07:15:43.657993 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:44 crc kubenswrapper[5102]: I0123 07:15:44.116665 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-685444497c-xk8fn"] Jan 23 07:15:44 crc kubenswrapper[5102]: I0123 07:15:44.120882 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10554b39-ce02-4ee0-ba52-9e54f14065ad","Type":"ContainerStarted","Data":"b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151"} Jan 23 07:15:44 crc kubenswrapper[5102]: I0123 07:15:44.123071 5102 generic.go:334] "Generic (PLEG): container finished" podID="6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" containerID="72ddf8ff49e5a3193c48bebbeb2acb4f025af8aea7f170ec0287cc32ad8e0c2e" exitCode=0 Jan 23 07:15:44 crc kubenswrapper[5102]: I0123 07:15:44.123175 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2kgk7" event={"ID":"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68","Type":"ContainerDied","Data":"72ddf8ff49e5a3193c48bebbeb2acb4f025af8aea7f170ec0287cc32ad8e0c2e"} Jan 23 07:15:44 crc kubenswrapper[5102]: I0123 07:15:44.126344 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"35f0a765-fbac-4583-ade4-5ecb8d6d3264","Type":"ContainerStarted","Data":"892105c146ee06febdf3ffc361005cc973d3bb16201b390dd4dfdc6f24ca8ed0"} Jan 23 07:15:44 crc kubenswrapper[5102]: W0123 07:15:44.139732 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poddff27ff3_187a_4e1d_9505_d02ab69bc244.slice/crio-877157e21384eb2dd0ef1ed03882e1f4708bcd1071f40580f39d6d16e95bae14 WatchSource:0}: Error finding container 877157e21384eb2dd0ef1ed03882e1f4708bcd1071f40580f39d6d16e95bae14: Status 404 returned error can't find the container with id 877157e21384eb2dd0ef1ed03882e1f4708bcd1071f40580f39d6d16e95bae14 Jan 23 07:15:44 crc kubenswrapper[5102]: I0123 07:15:44.178920 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=16.178900892 podStartE2EDuration="16.178900892s" podCreationTimestamp="2026-01-23 07:15:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:44.166112793 +0000 UTC m=+1294.986461768" watchObservedRunningTime="2026-01-23 07:15:44.178900892 +0000 UTC m=+1294.999249867" Jan 23 07:15:44 crc kubenswrapper[5102]: I0123 07:15:44.319860 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-7f649ddc48-2nj2r"] Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.138128 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f649ddc48-2nj2r" event={"ID":"31653b49-9041-436e-a628-9334fab6d8d9","Type":"ContainerStarted","Data":"40c6f25d79f4df1418343eb41d1ad7f6f123243049310ace1c310db6af2c2917"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.138702 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.138749 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f649ddc48-2nj2r" event={"ID":"31653b49-9041-436e-a628-9334fab6d8d9","Type":"ContainerStarted","Data":"ecdec7d2302573b8754f4553efc7bcdaf8c97a42c627b3ecd9ab01aabac92543"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.138771 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f649ddc48-2nj2r" event={"ID":"31653b49-9041-436e-a628-9334fab6d8d9","Type":"ContainerStarted","Data":"8de16909c1909389452e97e3e2a20f5486bd00f9751d4695b6832aae28c95644"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.140883 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10554b39-ce02-4ee0-ba52-9e54f14065ad","Type":"ContainerStarted","Data":"0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.144608 5102 generic.go:334] "Generic (PLEG): container finished" podID="a9080948-c87d-49da-b53e-b5228f44a2d4" containerID="1088d9848b17f47b3d88ce07e62e66b5442e4c6d98cebfd472e902a697152235" exitCode=0 Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.144698 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62l6f" event={"ID":"a9080948-c87d-49da-b53e-b5228f44a2d4","Type":"ContainerDied","Data":"1088d9848b17f47b3d88ce07e62e66b5442e4c6d98cebfd472e902a697152235"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.149518 5102 generic.go:334] "Generic (PLEG): container finished" podID="5dcb8a22-2a42-4baf-8b27-7041f960563c" containerID="5078354f12a1faec162881f83a1d700354f0688d65cca708e2c62816bdad5e58" exitCode=0 Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.149623 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zqc5s" event={"ID":"5dcb8a22-2a42-4baf-8b27-7041f960563c","Type":"ContainerDied","Data":"5078354f12a1faec162881f83a1d700354f0688d65cca708e2c62816bdad5e58"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.155274 5102 generic.go:334] "Generic (PLEG): container finished" podID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerID="4a4473f4f723a6ec8f3a04c25492e0febaa16f4a70829cb4f282a8c866dcb8cb" exitCode=0 Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.155531 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-xk8fn" event={"ID":"dff27ff3-187a-4e1d-9505-d02ab69bc244","Type":"ContainerDied","Data":"4a4473f4f723a6ec8f3a04c25492e0febaa16f4a70829cb4f282a8c866dcb8cb"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.155681 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-xk8fn" event={"ID":"dff27ff3-187a-4e1d-9505-d02ab69bc244","Type":"ContainerStarted","Data":"877157e21384eb2dd0ef1ed03882e1f4708bcd1071f40580f39d6d16e95bae14"} Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.166673 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-7f649ddc48-2nj2r" podStartSLOduration=2.166643398 podStartE2EDuration="2.166643398s" podCreationTimestamp="2026-01-23 07:15:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:45.161605251 +0000 UTC m=+1295.981954226" watchObservedRunningTime="2026-01-23 07:15:45.166643398 +0000 UTC m=+1295.986992373" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.302660 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.302619459 podStartE2EDuration="5.302619459s" podCreationTimestamp="2026-01-23 07:15:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:45.265217776 +0000 UTC m=+1296.085566741" watchObservedRunningTime="2026-01-23 07:15:45.302619459 +0000 UTC m=+1296.122968424" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.517325 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.602144 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-combined-ca-bundle\") pod \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.603297 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-logs" (OuterVolumeSpecName: "logs") pod "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" (UID: "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.603776 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-logs\") pod \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.603852 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-scripts\") pod \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.605526 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-config-data\") pod \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.606074 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hvrm\" (UniqueName: \"kubernetes.io/projected/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-kube-api-access-5hvrm\") pod \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\" (UID: \"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68\") " Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.607462 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.610139 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-scripts" (OuterVolumeSpecName: "scripts") pod "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" (UID: "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.611467 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-kube-api-access-5hvrm" (OuterVolumeSpecName: "kube-api-access-5hvrm") pod "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" (UID: "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68"). InnerVolumeSpecName "kube-api-access-5hvrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.629987 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-config-data" (OuterVolumeSpecName: "config-data") pod "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" (UID: "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.632483 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" (UID: "6b555d5d-9388-40a6-b4c5-7d0edd8c3e68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.709870 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.709925 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hvrm\" (UniqueName: \"kubernetes.io/projected/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-kube-api-access-5hvrm\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.709942 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:45 crc kubenswrapper[5102]: I0123 07:15:45.709957 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.196257 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-db-sync-2kgk7" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.196296 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-db-sync-2kgk7" event={"ID":"6b555d5d-9388-40a6-b4c5-7d0edd8c3e68","Type":"ContainerDied","Data":"3076a0bec14bfc1b8e18ffcf8642761e06f2a0e2b2a610dec9c2365de0fa3798"} Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.197212 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3076a0bec14bfc1b8e18ffcf8642761e06f2a0e2b2a610dec9c2365de0fa3798" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.199694 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-xk8fn" event={"ID":"dff27ff3-187a-4e1d-9505-d02ab69bc244","Type":"ContainerStarted","Data":"d0c2c8a828bd6e93400e9785c674f6ed4635c1d0b13a9171d774dd815910659a"} Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.200028 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.220447 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-685444497c-xk8fn" podStartSLOduration=3.220426329 podStartE2EDuration="3.220426329s" podCreationTimestamp="2026-01-23 07:15:43 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:46.218126258 +0000 UTC m=+1297.038475223" watchObservedRunningTime="2026-01-23 07:15:46.220426329 +0000 UTC m=+1297.040775304" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.332813 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-5f55c94446-2fcrd"] Jan 23 07:15:46 crc kubenswrapper[5102]: E0123 07:15:46.333223 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" containerName="placement-db-sync" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.333241 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" containerName="placement-db-sync" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.333433 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" containerName="placement-db-sync" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.334375 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.339636 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-scripts" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.339945 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-placement-dockercfg-tmxnq" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.340101 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-public-svc" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.340283 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-placement-internal-svc" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.341189 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-config-data" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.366729 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5f55c94446-2fcrd"] Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.427590 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-config-data\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.427642 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-scripts\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.427700 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35dab127-50f2-4f30-ba2f-68744d0a6ae8-logs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.427731 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-combined-ca-bundle\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.427751 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-internal-tls-certs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.427780 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j4kdl\" (UniqueName: \"kubernetes.io/projected/35dab127-50f2-4f30-ba2f-68744d0a6ae8-kube-api-access-j4kdl\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.427830 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-public-tls-certs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.529056 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-config-data\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.529113 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-scripts\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.529173 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35dab127-50f2-4f30-ba2f-68744d0a6ae8-logs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.529203 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-combined-ca-bundle\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.529226 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-internal-tls-certs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.529254 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j4kdl\" (UniqueName: \"kubernetes.io/projected/35dab127-50f2-4f30-ba2f-68744d0a6ae8-kube-api-access-j4kdl\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.529304 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-public-tls-certs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.530165 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35dab127-50f2-4f30-ba2f-68744d0a6ae8-logs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.537635 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-public-tls-certs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.537665 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-internal-tls-certs\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.540185 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-combined-ca-bundle\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.545904 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-config-data\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.555142 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-scripts\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.555927 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j4kdl\" (UniqueName: \"kubernetes.io/projected/35dab127-50f2-4f30-ba2f-68744d0a6ae8-kube-api-access-j4kdl\") pod \"placement-5f55c94446-2fcrd\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.660152 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.695725 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.710679 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.834981 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-credential-keys\") pod \"5dcb8a22-2a42-4baf-8b27-7041f960563c\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835043 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-scripts\") pod \"5dcb8a22-2a42-4baf-8b27-7041f960563c\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835076 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-combined-ca-bundle\") pod \"a9080948-c87d-49da-b53e-b5228f44a2d4\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835142 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wzx6c\" (UniqueName: \"kubernetes.io/projected/a9080948-c87d-49da-b53e-b5228f44a2d4-kube-api-access-wzx6c\") pod \"a9080948-c87d-49da-b53e-b5228f44a2d4\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835232 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-config-data\") pod \"5dcb8a22-2a42-4baf-8b27-7041f960563c\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835252 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-db-sync-config-data\") pod \"a9080948-c87d-49da-b53e-b5228f44a2d4\" (UID: \"a9080948-c87d-49da-b53e-b5228f44a2d4\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835300 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-combined-ca-bundle\") pod \"5dcb8a22-2a42-4baf-8b27-7041f960563c\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835367 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hpnfz\" (UniqueName: \"kubernetes.io/projected/5dcb8a22-2a42-4baf-8b27-7041f960563c-kube-api-access-hpnfz\") pod \"5dcb8a22-2a42-4baf-8b27-7041f960563c\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.835391 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-fernet-keys\") pod \"5dcb8a22-2a42-4baf-8b27-7041f960563c\" (UID: \"5dcb8a22-2a42-4baf-8b27-7041f960563c\") " Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.836237 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-86d84bb977-w99l4"] Jan 23 07:15:46 crc kubenswrapper[5102]: E0123 07:15:46.836624 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5dcb8a22-2a42-4baf-8b27-7041f960563c" containerName="keystone-bootstrap" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.836642 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5dcb8a22-2a42-4baf-8b27-7041f960563c" containerName="keystone-bootstrap" Jan 23 07:15:46 crc kubenswrapper[5102]: E0123 07:15:46.836672 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9080948-c87d-49da-b53e-b5228f44a2d4" containerName="barbican-db-sync" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.836679 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9080948-c87d-49da-b53e-b5228f44a2d4" containerName="barbican-db-sync" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.836860 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="5dcb8a22-2a42-4baf-8b27-7041f960563c" containerName="keystone-bootstrap" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.836894 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9080948-c87d-49da-b53e-b5228f44a2d4" containerName="barbican-db-sync" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.837946 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.844307 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-internal-svc" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.844530 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-neutron-public-svc" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.850951 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "a9080948-c87d-49da-b53e-b5228f44a2d4" (UID: "a9080948-c87d-49da-b53e-b5228f44a2d4"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.857202 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86d84bb977-w99l4"] Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.872497 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "5dcb8a22-2a42-4baf-8b27-7041f960563c" (UID: "5dcb8a22-2a42-4baf-8b27-7041f960563c"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.877796 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5dcb8a22-2a42-4baf-8b27-7041f960563c-kube-api-access-hpnfz" (OuterVolumeSpecName: "kube-api-access-hpnfz") pod "5dcb8a22-2a42-4baf-8b27-7041f960563c" (UID: "5dcb8a22-2a42-4baf-8b27-7041f960563c"). InnerVolumeSpecName "kube-api-access-hpnfz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.891533 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-scripts" (OuterVolumeSpecName: "scripts") pod "5dcb8a22-2a42-4baf-8b27-7041f960563c" (UID: "5dcb8a22-2a42-4baf-8b27-7041f960563c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.893160 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "5dcb8a22-2a42-4baf-8b27-7041f960563c" (UID: "5dcb8a22-2a42-4baf-8b27-7041f960563c"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.898986 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9080948-c87d-49da-b53e-b5228f44a2d4-kube-api-access-wzx6c" (OuterVolumeSpecName: "kube-api-access-wzx6c") pod "a9080948-c87d-49da-b53e-b5228f44a2d4" (UID: "a9080948-c87d-49da-b53e-b5228f44a2d4"). InnerVolumeSpecName "kube-api-access-wzx6c". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.908735 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-config-data" (OuterVolumeSpecName: "config-data") pod "5dcb8a22-2a42-4baf-8b27-7041f960563c" (UID: "5dcb8a22-2a42-4baf-8b27-7041f960563c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.913418 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a9080948-c87d-49da-b53e-b5228f44a2d4" (UID: "a9080948-c87d-49da-b53e-b5228f44a2d4"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.933125 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "5dcb8a22-2a42-4baf-8b27-7041f960563c" (UID: "5dcb8a22-2a42-4baf-8b27-7041f960563c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937106 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-ovndb-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937159 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-httpd-config\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937184 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-combined-ca-bundle\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937226 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-config\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937282 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-public-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937299 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-internal-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937321 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nxxqm\" (UniqueName: \"kubernetes.io/projected/b3cd0058-0aaf-4628-8451-91a9b48925a1-kube-api-access-nxxqm\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937379 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hpnfz\" (UniqueName: \"kubernetes.io/projected/5dcb8a22-2a42-4baf-8b27-7041f960563c-kube-api-access-hpnfz\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937394 5102 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937404 5102 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937412 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937421 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937430 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wzx6c\" (UniqueName: \"kubernetes.io/projected/a9080948-c87d-49da-b53e-b5228f44a2d4-kube-api-access-wzx6c\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937441 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937450 5102 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/a9080948-c87d-49da-b53e-b5228f44a2d4-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:46 crc kubenswrapper[5102]: I0123 07:15:46.937458 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/5dcb8a22-2a42-4baf-8b27-7041f960563c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.039458 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-public-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.039504 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-internal-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.039527 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nxxqm\" (UniqueName: \"kubernetes.io/projected/b3cd0058-0aaf-4628-8451-91a9b48925a1-kube-api-access-nxxqm\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.039601 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-ovndb-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.039620 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-httpd-config\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.039641 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-combined-ca-bundle\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.039674 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-config\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.043988 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-config\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.044465 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-httpd-config\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.044687 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-public-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.045678 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-internal-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.046308 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-ovndb-tls-certs\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.057449 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-combined-ca-bundle\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.059742 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nxxqm\" (UniqueName: \"kubernetes.io/projected/b3cd0058-0aaf-4628-8451-91a9b48925a1-kube-api-access-nxxqm\") pod \"neutron-86d84bb977-w99l4\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.215945 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-db-sync-62l6f" event={"ID":"a9080948-c87d-49da-b53e-b5228f44a2d4","Type":"ContainerDied","Data":"7e674bd58ad4d26d4a091884ec29657435a4787a5f0541b031a4ca6a7f8a88c3"} Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.215988 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7e674bd58ad4d26d4a091884ec29657435a4787a5f0541b031a4ca6a7f8a88c3" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.216053 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-db-sync-62l6f" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.218136 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.221243 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-bootstrap-zqc5s" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.221746 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-bootstrap-zqc5s" event={"ID":"5dcb8a22-2a42-4baf-8b27-7041f960563c","Type":"ContainerDied","Data":"c519bc978abc2ae44d20752cfdef86f62da801ee0a5307181a31ea0f93059d4f"} Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.221812 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c519bc978abc2ae44d20752cfdef86f62da801ee0a5307181a31ea0f93059d4f" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.310956 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-756757b6f5-klql8"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.312881 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.318259 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.318518 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-scripts" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.318704 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-keystone-dockercfg-f5hn2" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.318823 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-internal-svc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.319072 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-keystone-public-svc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.318833 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-config-data" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.326476 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-756757b6f5-klql8"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.453086 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-scripts\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.453377 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-config-data\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.453451 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hd2vf\" (UniqueName: \"kubernetes.io/projected/8dfe2011-cf9e-413e-b53a-c7ff73f81161-kube-api-access-hd2vf\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.453578 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-internal-tls-certs\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.453842 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-combined-ca-bundle\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.454068 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-public-tls-certs\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.454119 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-fernet-keys\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.454157 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-credential-keys\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.473979 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-7c6b47c6df-vm2sf"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.482022 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.496838 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-config-data" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.497181 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-worker-config-data" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.497351 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-barbican-dockercfg-nlrnn" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.511689 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7c6b47c6df-vm2sf"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.539758 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-58b7895cd-h85fc"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.555754 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-public-tls-certs\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559352 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-fernet-keys\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559439 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-credential-keys\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559484 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzvk2\" (UniqueName: \"kubernetes.io/projected/d8d80be7-3115-4f2c-81aa-b906b26f339e-kube-api-access-tzvk2\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559573 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-scripts\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559591 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-config-data\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559639 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hd2vf\" (UniqueName: \"kubernetes.io/projected/8dfe2011-cf9e-413e-b53a-c7ff73f81161-kube-api-access-hd2vf\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559712 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d80be7-3115-4f2c-81aa-b906b26f339e-logs\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559726 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.559747 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-internal-tls-certs\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.560532 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.560735 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-combined-ca-bundle\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.560926 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data-custom\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.560983 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-combined-ca-bundle\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.571172 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-keystone-listener-config-data" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.580469 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-combined-ca-bundle\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.582825 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-scripts\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.586186 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-public-tls-certs\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.588690 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-58b7895cd-h85fc"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.589251 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-credential-keys\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.594497 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hd2vf\" (UniqueName: \"kubernetes.io/projected/8dfe2011-cf9e-413e-b53a-c7ff73f81161-kube-api-access-hd2vf\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.594673 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-internal-tls-certs\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.596513 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-config-data\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.606247 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-fernet-keys\") pod \"keystone-756757b6f5-klql8\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.631596 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-685444497c-xk8fn"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.636754 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-tpx4p"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.638875 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.658617 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.663454 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-config\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.663515 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-combined-ca-bundle\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.663583 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzvk2\" (UniqueName: \"kubernetes.io/projected/d8d80be7-3115-4f2c-81aa-b906b26f339e-kube-api-access-tzvk2\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.663605 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.663654 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0544141-4b33-4876-9946-575472f62d80-logs\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.663682 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-nb\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.672829 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d80be7-3115-4f2c-81aa-b906b26f339e-logs\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.672969 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wvgq9\" (UniqueName: \"kubernetes.io/projected/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-kube-api-access-wvgq9\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673036 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-swift-storage-0\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673102 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673259 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnmhp\" (UniqueName: \"kubernetes.io/projected/d0544141-4b33-4876-9946-575472f62d80-kube-api-access-gnmhp\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673334 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-svc\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673360 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-sb\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673432 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data-custom\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673576 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data-custom\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673606 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-combined-ca-bundle\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.673774 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d80be7-3115-4f2c-81aa-b906b26f339e-logs\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.679014 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-combined-ca-bundle\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.685793 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-tpx4p"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.701058 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.701176 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-679f9d9c44-mmp9r"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.703032 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data-custom\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.709226 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzvk2\" (UniqueName: \"kubernetes.io/projected/d8d80be7-3115-4f2c-81aa-b906b26f339e-kube-api-access-tzvk2\") pod \"barbican-worker-7c6b47c6df-vm2sf\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.712034 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.726672 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-api-config-data" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.734980 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-679f9d9c44-mmp9r"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.774967 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data-custom\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775050 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-config\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775070 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-combined-ca-bundle\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775099 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775121 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data-custom\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775137 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-combined-ca-bundle\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775159 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0544141-4b33-4876-9946-575472f62d80-logs\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775180 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7qg8\" (UniqueName: \"kubernetes.io/projected/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-kube-api-access-v7qg8\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775205 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-nb\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775240 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wvgq9\" (UniqueName: \"kubernetes.io/projected/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-kube-api-access-wvgq9\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775259 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-swift-storage-0\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775279 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775297 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-logs\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775327 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnmhp\" (UniqueName: \"kubernetes.io/projected/d0544141-4b33-4876-9946-575472f62d80-kube-api-access-gnmhp\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775348 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-svc\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.775362 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-sb\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.776285 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-sb\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.777328 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-config\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.777664 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0544141-4b33-4876-9946-575472f62d80-logs\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.778089 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-nb\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.778797 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-svc\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.779861 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-swift-storage-0\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.791629 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data-custom\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.794047 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.802532 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-combined-ca-bundle\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.847666 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnmhp\" (UniqueName: \"kubernetes.io/projected/d0544141-4b33-4876-9946-575472f62d80-kube-api-access-gnmhp\") pod \"barbican-keystone-listener-58b7895cd-h85fc\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.866370 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.882791 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data-custom\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.882850 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-combined-ca-bundle\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.882885 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7qg8\" (UniqueName: \"kubernetes.io/projected/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-kube-api-access-v7qg8\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.882982 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.883011 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-logs\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.887388 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-logs\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.890316 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data-custom\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.892422 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wvgq9\" (UniqueName: \"kubernetes.io/projected/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-kube-api-access-wvgq9\") pod \"dnsmasq-dns-66cdd4b5b5-tpx4p\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.894796 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-keystone-listener-57649777bb-wl6hv"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.896319 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.904741 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-combined-ca-bundle\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.919799 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.947218 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-57649777bb-wl6hv"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.947665 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7qg8\" (UniqueName: \"kubernetes.io/projected/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-kube-api-access-v7qg8\") pod \"barbican-api-679f9d9c44-mmp9r\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.981635 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-worker-5f48c766d5-kqw8p"] Jan 23 07:15:47 crc kubenswrapper[5102]: I0123 07:15:47.984736 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.047644 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5f48c766d5-kqw8p"] Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087519 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-combined-ca-bundle\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087598 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6abef536-ae8a-4a68-9c29-87a9af5aaee6-logs\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087651 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9rnvx\" (UniqueName: \"kubernetes.io/projected/57f488ce-4b72-40f4-82d8-ad074776c306-kube-api-access-9rnvx\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087687 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087737 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data-custom\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087781 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087830 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57f488ce-4b72-40f4-82d8-ad074776c306-logs\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087855 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ssvr6\" (UniqueName: \"kubernetes.io/projected/6abef536-ae8a-4a68-9c29-87a9af5aaee6-kube-api-access-ssvr6\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.087966 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-combined-ca-bundle\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.088009 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data-custom\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.101432 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.118762 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.146253 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.155286 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-5f4bb86bfb-vm5xl"] Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.157055 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.185803 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f4bb86bfb-vm5xl"] Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.189893 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-combined-ca-bundle\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.189939 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data-custom\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190028 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6abef536-ae8a-4a68-9c29-87a9af5aaee6-logs\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190049 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-combined-ca-bundle\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190071 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9rnvx\" (UniqueName: \"kubernetes.io/projected/57f488ce-4b72-40f4-82d8-ad074776c306-kube-api-access-9rnvx\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190101 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190122 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data-custom\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190157 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190190 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57f488ce-4b72-40f4-82d8-ad074776c306-logs\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.190211 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ssvr6\" (UniqueName: \"kubernetes.io/projected/6abef536-ae8a-4a68-9c29-87a9af5aaee6-kube-api-access-ssvr6\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.197043 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6abef536-ae8a-4a68-9c29-87a9af5aaee6-logs\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.209023 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data-custom\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.210083 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data-custom\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.211616 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57f488ce-4b72-40f4-82d8-ad074776c306-logs\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.223056 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-combined-ca-bundle\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.223683 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ssvr6\" (UniqueName: \"kubernetes.io/projected/6abef536-ae8a-4a68-9c29-87a9af5aaee6-kube-api-access-ssvr6\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.234662 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-combined-ca-bundle\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.237841 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.238799 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data\") pod \"barbican-worker-5f48c766d5-kqw8p\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.240819 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-685444497c-xk8fn" podUID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerName="dnsmasq-dns" containerID="cri-o://d0c2c8a828bd6e93400e9785c674f6ed4635c1d0b13a9171d774dd815910659a" gracePeriod=10 Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.241351 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9rnvx\" (UniqueName: \"kubernetes.io/projected/57f488ce-4b72-40f4-82d8-ad074776c306-kube-api-access-9rnvx\") pod \"barbican-keystone-listener-57649777bb-wl6hv\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.258999 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.291879 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kf5km\" (UniqueName: \"kubernetes.io/projected/253392be-f480-4837-b1c2-4d92fe442c09-kube-api-access-kf5km\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.292129 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253392be-f480-4837-b1c2-4d92fe442c09-logs\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.292248 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-combined-ca-bundle\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.292401 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.292510 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data-custom\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.395001 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.395067 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data-custom\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.395144 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kf5km\" (UniqueName: \"kubernetes.io/projected/253392be-f480-4837-b1c2-4d92fe442c09-kube-api-access-kf5km\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.395236 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253392be-f480-4837-b1c2-4d92fe442c09-logs\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.395282 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-combined-ca-bundle\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.396160 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253392be-f480-4837-b1c2-4d92fe442c09-logs\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.400729 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-combined-ca-bundle\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.403031 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.405953 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data-custom\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.415628 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kf5km\" (UniqueName: \"kubernetes.io/projected/253392be-f480-4837-b1c2-4d92fe442c09-kube-api-access-kf5km\") pod \"barbican-api-5f4bb86bfb-vm5xl\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.425661 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:15:48 crc kubenswrapper[5102]: I0123 07:15:48.483579 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:49 crc kubenswrapper[5102]: I0123 07:15:49.256235 5102 generic.go:334] "Generic (PLEG): container finished" podID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerID="d0c2c8a828bd6e93400e9785c674f6ed4635c1d0b13a9171d774dd815910659a" exitCode=0 Jan 23 07:15:49 crc kubenswrapper[5102]: I0123 07:15:49.256316 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-xk8fn" event={"ID":"dff27ff3-187a-4e1d-9505-d02ab69bc244","Type":"ContainerDied","Data":"d0c2c8a828bd6e93400e9785c674f6ed4635c1d0b13a9171d774dd815910659a"} Jan 23 07:15:49 crc kubenswrapper[5102]: I0123 07:15:49.588519 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:49 crc kubenswrapper[5102]: I0123 07:15:49.588923 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:49 crc kubenswrapper[5102]: I0123 07:15:49.637934 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:49 crc kubenswrapper[5102]: I0123 07:15:49.648650 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.077524 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.154707 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-config\") pod \"dff27ff3-187a-4e1d-9505-d02ab69bc244\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.154823 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-swift-storage-0\") pod \"dff27ff3-187a-4e1d-9505-d02ab69bc244\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.154892 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-svc\") pod \"dff27ff3-187a-4e1d-9505-d02ab69bc244\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.154951 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-nb\") pod \"dff27ff3-187a-4e1d-9505-d02ab69bc244\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.154975 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-sb\") pod \"dff27ff3-187a-4e1d-9505-d02ab69bc244\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.155003 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zqntl\" (UniqueName: \"kubernetes.io/projected/dff27ff3-187a-4e1d-9505-d02ab69bc244-kube-api-access-zqntl\") pod \"dff27ff3-187a-4e1d-9505-d02ab69bc244\" (UID: \"dff27ff3-187a-4e1d-9505-d02ab69bc244\") " Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.175708 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dff27ff3-187a-4e1d-9505-d02ab69bc244-kube-api-access-zqntl" (OuterVolumeSpecName: "kube-api-access-zqntl") pod "dff27ff3-187a-4e1d-9505-d02ab69bc244" (UID: "dff27ff3-187a-4e1d-9505-d02ab69bc244"). InnerVolumeSpecName "kube-api-access-zqntl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.257605 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zqntl\" (UniqueName: \"kubernetes.io/projected/dff27ff3-187a-4e1d-9505-d02ab69bc244-kube-api-access-zqntl\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.291996 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-685444497c-xk8fn" event={"ID":"dff27ff3-187a-4e1d-9505-d02ab69bc244","Type":"ContainerDied","Data":"877157e21384eb2dd0ef1ed03882e1f4708bcd1071f40580f39d6d16e95bae14"} Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.292079 5102 scope.go:117] "RemoveContainer" containerID="d0c2c8a828bd6e93400e9785c674f6ed4635c1d0b13a9171d774dd815910659a" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.292487 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-685444497c-xk8fn" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.293011 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.293074 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.344864 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "dff27ff3-187a-4e1d-9505-d02ab69bc244" (UID: "dff27ff3-187a-4e1d-9505-d02ab69bc244"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.345915 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "dff27ff3-187a-4e1d-9505-d02ab69bc244" (UID: "dff27ff3-187a-4e1d-9505-d02ab69bc244"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.356046 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-config" (OuterVolumeSpecName: "config") pod "dff27ff3-187a-4e1d-9505-d02ab69bc244" (UID: "dff27ff3-187a-4e1d-9505-d02ab69bc244"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.360132 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.360174 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.360188 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.369738 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "dff27ff3-187a-4e1d-9505-d02ab69bc244" (UID: "dff27ff3-187a-4e1d-9505-d02ab69bc244"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.375715 5102 scope.go:117] "RemoveContainer" containerID="4a4473f4f723a6ec8f3a04c25492e0febaa16f4a70829cb4f282a8c866dcb8cb" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.386009 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "dff27ff3-187a-4e1d-9505-d02ab69bc244" (UID: "dff27ff3-187a-4e1d-9505-d02ab69bc244"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.464232 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.464697 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/dff27ff3-187a-4e1d-9505-d02ab69bc244-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.658800 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-685444497c-xk8fn"] Jan 23 07:15:50 crc kubenswrapper[5102]: I0123 07:15:50.674934 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-685444497c-xk8fn"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.085672 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-58b7895cd-h85fc"] Jan 23 07:15:51 crc kubenswrapper[5102]: W0123 07:15:51.100887 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5faa20_dfb4_4dc5_b4e0_21d0fef34cc1.slice/crio-2bbe42b18e5260d1dd5c274d62434ecba80f4a25db0dfd21e62f8aab0f911e30 WatchSource:0}: Error finding container 2bbe42b18e5260d1dd5c274d62434ecba80f4a25db0dfd21e62f8aab0f911e30: Status 404 returned error can't find the container with id 2bbe42b18e5260d1dd5c274d62434ecba80f4a25db0dfd21e62f8aab0f911e30 Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.118068 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-679f9d9c44-mmp9r"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.199751 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-keystone-listener-57649777bb-wl6hv"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.229912 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-5f55c94446-2fcrd"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.292703 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-756757b6f5-klql8"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.324957 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" event={"ID":"57f488ce-4b72-40f4-82d8-ad074776c306","Type":"ContainerStarted","Data":"336f61f7256e8da64e4504e408d7f28b4158e344d35dc10ca4cbc92c0b71073e"} Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.344014 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.344086 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.354798 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" event={"ID":"d8d80be7-3115-4f2c-81aa-b906b26f339e","Type":"ContainerStarted","Data":"d7896ca35364a829e3c667d39afe7ff569f912adde7776d51eabab5d24a1a9fc"} Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.364588 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" event={"ID":"d0544141-4b33-4876-9946-575472f62d80","Type":"ContainerStarted","Data":"b8e032662c2c6bb9e5c6da19ed0766ccd9dec60457653b713643ec248a36621b"} Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.383723 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-756757b6f5-klql8" event={"ID":"8dfe2011-cf9e-413e-b53a-c7ff73f81161","Type":"ContainerStarted","Data":"78f848b352e4057fe24fef652e86c0afbc064cf60ecb60cf471919c13cf57af0"} Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.389481 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-5f4bb86bfb-vm5xl"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.406647 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f55c94446-2fcrd" event={"ID":"35dab127-50f2-4f30-ba2f-68744d0a6ae8","Type":"ContainerStarted","Data":"b7288f5f6274dd79e1a3005878fef3cfc5f379444935eb7909b3c44c8a67ca49"} Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.414364 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-679f9d9c44-mmp9r" event={"ID":"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1","Type":"ContainerStarted","Data":"2bbe42b18e5260d1dd5c274d62434ecba80f4a25db0dfd21e62f8aab0f911e30"} Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.425697 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-7c6b47c6df-vm2sf"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.455500 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.471328 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerStarted","Data":"127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef"} Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.480000 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.480937 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-tpx4p"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.525916 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-worker-5f48c766d5-kqw8p"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.530914 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-86d84bb977-w99l4"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.543599 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-679f9d9c44-mmp9r"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.554379 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-api-f86b8db9b-zlplv"] Jan 23 07:15:51 crc kubenswrapper[5102]: E0123 07:15:51.555045 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerName="dnsmasq-dns" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.555068 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerName="dnsmasq-dns" Jan 23 07:15:51 crc kubenswrapper[5102]: E0123 07:15:51.555100 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerName="init" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.555108 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerName="init" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.555375 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="dff27ff3-187a-4e1d-9505-d02ab69bc244" containerName="dnsmasq-dns" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.556630 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.560017 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-public-svc" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.560504 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-barbican-internal-svc" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.562660 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f86b8db9b-zlplv"] Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.626815 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dff27ff3-187a-4e1d-9505-d02ab69bc244" path="/var/lib/kubelet/pods/dff27ff3-187a-4e1d-9505-d02ab69bc244/volumes" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.705371 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.705904 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-internal-tls-certs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.705957 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-combined-ca-bundle\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.705984 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-public-tls-certs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.706072 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bvzfj\" (UniqueName: \"kubernetes.io/projected/84ff9e74-154d-4279-befe-109c03fb7c3b-kube-api-access-bvzfj\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.706117 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data-custom\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.706151 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84ff9e74-154d-4279-befe-109c03fb7c3b-logs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.809614 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84ff9e74-154d-4279-befe-109c03fb7c3b-logs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.809774 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.809825 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-internal-tls-certs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.809878 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-combined-ca-bundle\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.809914 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-public-tls-certs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.810056 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bvzfj\" (UniqueName: \"kubernetes.io/projected/84ff9e74-154d-4279-befe-109c03fb7c3b-kube-api-access-bvzfj\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.810137 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data-custom\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.818160 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84ff9e74-154d-4279-befe-109c03fb7c3b-logs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.819041 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-public-tls-certs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.820807 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-combined-ca-bundle\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.827147 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.827345 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data-custom\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.827569 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-internal-tls-certs\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.831523 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bvzfj\" (UniqueName: \"kubernetes.io/projected/84ff9e74-154d-4279-befe-109c03fb7c3b-kube-api-access-bvzfj\") pod \"barbican-api-f86b8db9b-zlplv\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:51 crc kubenswrapper[5102]: I0123 07:15:51.892732 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.467526 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-api-f86b8db9b-zlplv"] Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.486350 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-756757b6f5-klql8" event={"ID":"8dfe2011-cf9e-413e-b53a-c7ff73f81161","Type":"ContainerStarted","Data":"32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.487135 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.491697 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f55c94446-2fcrd" event={"ID":"35dab127-50f2-4f30-ba2f-68744d0a6ae8","Type":"ContainerStarted","Data":"04a75c697ea1aac00dffbc51b878b9c90262d7c394882f3e8e4fead3dde40397"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.508788 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/keystone-756757b6f5-klql8" podStartSLOduration=5.508764149 podStartE2EDuration="5.508764149s" podCreationTimestamp="2026-01-23 07:15:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:52.503416213 +0000 UTC m=+1303.323765188" watchObservedRunningTime="2026-01-23 07:15:52.508764149 +0000 UTC m=+1303.329113124" Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.517198 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-679f9d9c44-mmp9r" event={"ID":"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1","Type":"ContainerStarted","Data":"13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.517270 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-679f9d9c44-mmp9r" event={"ID":"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1","Type":"ContainerStarted","Data":"fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.517471 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-679f9d9c44-mmp9r" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api-log" containerID="cri-o://fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2" gracePeriod=30 Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.517905 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.517956 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.518310 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-679f9d9c44-mmp9r" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api" containerID="cri-o://13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f" gracePeriod=30 Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.530670 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86d84bb977-w99l4" event={"ID":"b3cd0058-0aaf-4628-8451-91a9b48925a1","Type":"ContainerStarted","Data":"424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.530724 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86d84bb977-w99l4" event={"ID":"b3cd0058-0aaf-4628-8451-91a9b48925a1","Type":"ContainerStarted","Data":"ceba6e70548e27344439cf65f6e7c6810a9d1fb46e75b64e482ab07843570224"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.562275 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" event={"ID":"253392be-f480-4837-b1c2-4d92fe442c09","Type":"ContainerStarted","Data":"522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.562350 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" event={"ID":"253392be-f480-4837-b1c2-4d92fe442c09","Type":"ContainerStarted","Data":"3162d1960db8510e71507894e2b487bff1c1f991fe81eaad085ec3425af1df9c"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.589058 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f48c766d5-kqw8p" event={"ID":"6abef536-ae8a-4a68-9c29-87a9af5aaee6","Type":"ContainerStarted","Data":"cd4bc9de53c6d3ebbca38bdbd32be0043c5c807fc468bf8946363360d6ae1874"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.609615 5102 generic.go:334] "Generic (PLEG): container finished" podID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerID="418ec834bc43dcd3b6f8f4ac43e21ff0ae20f8aa8627023ec70b7e6a7eafe633" exitCode=0 Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.612024 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" event={"ID":"1c19dfac-bb3d-49f4-9296-5785c0e30ef7","Type":"ContainerDied","Data":"418ec834bc43dcd3b6f8f4ac43e21ff0ae20f8aa8627023ec70b7e6a7eafe633"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.612088 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.612102 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.612111 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" event={"ID":"1c19dfac-bb3d-49f4-9296-5785c0e30ef7","Type":"ContainerStarted","Data":"065fb01ed0695b220e02202d91a02ad5783736d08c889d9a1d2d61bcaa239229"} Jan 23 07:15:52 crc kubenswrapper[5102]: I0123 07:15:52.656483 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-679f9d9c44-mmp9r" podStartSLOduration=5.656463485 podStartE2EDuration="5.656463485s" podCreationTimestamp="2026-01-23 07:15:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:52.553862593 +0000 UTC m=+1303.374211568" watchObservedRunningTime="2026-01-23 07:15:52.656463485 +0000 UTC m=+1303.476812460" Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.532926 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.533433 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.682371 5102 generic.go:334] "Generic (PLEG): container finished" podID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerID="fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2" exitCode=143 Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.682483 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-679f9d9c44-mmp9r" event={"ID":"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1","Type":"ContainerDied","Data":"fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2"} Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.690314 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" event={"ID":"253392be-f480-4837-b1c2-4d92fe442c09","Type":"ContainerStarted","Data":"655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1"} Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.690423 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.690450 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.706269 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f55c94446-2fcrd" event={"ID":"35dab127-50f2-4f30-ba2f-68744d0a6ae8","Type":"ContainerStarted","Data":"d62cf3f61ec961d54c0543a7c6db6538a2fa229a7aa3236626738a9910298f8a"} Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.725226 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" podStartSLOduration=5.725200343 podStartE2EDuration="5.725200343s" podCreationTimestamp="2026-01-23 07:15:48 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:53.72062252 +0000 UTC m=+1304.540971495" watchObservedRunningTime="2026-01-23 07:15:53.725200343 +0000 UTC m=+1304.545549318" Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.735628 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 07:15:53 crc kubenswrapper[5102]: I0123 07:15:53.756146 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/placement-5f55c94446-2fcrd" podStartSLOduration=7.756122045 podStartE2EDuration="7.756122045s" podCreationTimestamp="2026-01-23 07:15:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:53.744190634 +0000 UTC m=+1304.564539609" watchObservedRunningTime="2026-01-23 07:15:53.756122045 +0000 UTC m=+1304.576471010" Jan 23 07:15:54 crc kubenswrapper[5102]: I0123 07:15:54.714769 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 07:15:54 crc kubenswrapper[5102]: I0123 07:15:54.715029 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 07:15:54 crc kubenswrapper[5102]: I0123 07:15:54.714869 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:54 crc kubenswrapper[5102]: I0123 07:15:54.715137 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.699295 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.738016 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86d84bb977-w99l4" event={"ID":"b3cd0058-0aaf-4628-8451-91a9b48925a1","Type":"ContainerStarted","Data":"11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.738259 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.743317 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" event={"ID":"57f488ce-4b72-40f4-82d8-ad074776c306","Type":"ContainerStarted","Data":"0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.744778 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" event={"ID":"d8d80be7-3115-4f2c-81aa-b906b26f339e","Type":"ContainerStarted","Data":"e67f52221dfd73d87e1424d36597ef15ba815b679b3c3a42a426b09b6d7202ba"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.747733 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" event={"ID":"d0544141-4b33-4876-9946-575472f62d80","Type":"ContainerStarted","Data":"1c2b7872a2545b66a57c882256af7217337d1fd8dff89dcd483e2cb0e43f8857"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.768566 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f48c766d5-kqw8p" event={"ID":"6abef536-ae8a-4a68-9c29-87a9af5aaee6","Type":"ContainerStarted","Data":"6070b8a73f605944bd3a15d8db62c51a5d73dffd518f94ec4f6ed403ec5ef669"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.781780 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f86b8db9b-zlplv" event={"ID":"84ff9e74-154d-4279-befe-109c03fb7c3b","Type":"ContainerStarted","Data":"34cd1354e2d8d1a8790a5b6bcf27425452892c21859624035093db53b7f4bf45"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.781836 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f86b8db9b-zlplv" event={"ID":"84ff9e74-154d-4279-befe-109c03fb7c3b","Type":"ContainerStarted","Data":"f0eb75d7f57a59cc28eabe688663cc93fbca89d4f9b8db35dbd7d41944090f4d"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.786999 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.789081 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" event={"ID":"1c19dfac-bb3d-49f4-9296-5785c0e30ef7","Type":"ContainerStarted","Data":"374e26f309c1a6a573b1c2bf276c756c491501f820c1e4306fd56a9acfafff0b"} Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.789718 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.828298 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" podStartSLOduration=8.828266935 podStartE2EDuration="8.828266935s" podCreationTimestamp="2026-01-23 07:15:47 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:55.81781758 +0000 UTC m=+1306.638166575" watchObservedRunningTime="2026-01-23 07:15:55.828266935 +0000 UTC m=+1306.648615910" Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.831179 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-86d84bb977-w99l4" podStartSLOduration=9.831163926 podStartE2EDuration="9.831163926s" podCreationTimestamp="2026-01-23 07:15:46 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:55.767794974 +0000 UTC m=+1306.588143949" watchObservedRunningTime="2026-01-23 07:15:55.831163926 +0000 UTC m=+1306.651512901" Jan 23 07:15:55 crc kubenswrapper[5102]: I0123 07:15:55.835300 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.801597 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c2dfg" event={"ID":"ad1791e1-86ab-44e5-99e9-399e93cffc68","Type":"ContainerStarted","Data":"b5931e20a6ae974a9df4e142bed61f7199f857b21d6f031cee275cb71eed9329"} Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.812204 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" event={"ID":"57f488ce-4b72-40f4-82d8-ad074776c306","Type":"ContainerStarted","Data":"7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc"} Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.818038 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" event={"ID":"d8d80be7-3115-4f2c-81aa-b906b26f339e","Type":"ContainerStarted","Data":"7e93161384bff9dc096aa436e0fd11405c0eaa0866d1c12e9a8ccebe22e56097"} Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.828928 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" event={"ID":"d0544141-4b33-4876-9946-575472f62d80","Type":"ContainerStarted","Data":"5d6015bcce119402ae68512013660f0b377ef051da45613130cfb7bef505abde"} Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.832482 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f48c766d5-kqw8p" event={"ID":"6abef536-ae8a-4a68-9c29-87a9af5aaee6","Type":"ContainerStarted","Data":"d878afc3004c8f5f3e7c7a5b43603e184202681b56d93e9496c6bc1f56835ac3"} Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.835322 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-db-sync-c2dfg" podStartSLOduration=4.085749034 podStartE2EDuration="41.835298113s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="2026-01-23 07:15:17.423622801 +0000 UTC m=+1268.243971776" lastFinishedPulling="2026-01-23 07:15:55.17317188 +0000 UTC m=+1305.993520855" observedRunningTime="2026-01-23 07:15:56.835133498 +0000 UTC m=+1307.655482473" watchObservedRunningTime="2026-01-23 07:15:56.835298113 +0000 UTC m=+1307.655647078" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.837733 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f86b8db9b-zlplv" event={"ID":"84ff9e74-154d-4279-befe-109c03fb7c3b","Type":"ContainerStarted","Data":"da77c21b9df506f687f044080259f2d216b0315a1410a1b0676e52084c699b33"} Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.837777 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.838679 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.858730 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" podStartSLOduration=5.774502229 podStartE2EDuration="9.858709771s" podCreationTimestamp="2026-01-23 07:15:47 +0000 UTC" firstStartedPulling="2026-01-23 07:15:51.088286968 +0000 UTC m=+1301.908635943" lastFinishedPulling="2026-01-23 07:15:55.17249451 +0000 UTC m=+1305.992843485" observedRunningTime="2026-01-23 07:15:56.85289548 +0000 UTC m=+1307.673244455" watchObservedRunningTime="2026-01-23 07:15:56.858709771 +0000 UTC m=+1307.679058746" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.885583 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-5f48c766d5-kqw8p" podStartSLOduration=6.166741015 podStartE2EDuration="9.885555947s" podCreationTimestamp="2026-01-23 07:15:47 +0000 UTC" firstStartedPulling="2026-01-23 07:15:51.456194706 +0000 UTC m=+1302.276543671" lastFinishedPulling="2026-01-23 07:15:55.175009628 +0000 UTC m=+1305.995358603" observedRunningTime="2026-01-23 07:15:56.875742871 +0000 UTC m=+1307.696091846" watchObservedRunningTime="2026-01-23 07:15:56.885555947 +0000 UTC m=+1307.705904922" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.912705 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" podStartSLOduration=6.046507723 podStartE2EDuration="9.91267711s" podCreationTimestamp="2026-01-23 07:15:47 +0000 UTC" firstStartedPulling="2026-01-23 07:15:51.309096329 +0000 UTC m=+1302.129445304" lastFinishedPulling="2026-01-23 07:15:55.175265716 +0000 UTC m=+1305.995614691" observedRunningTime="2026-01-23 07:15:56.905397504 +0000 UTC m=+1307.725746479" watchObservedRunningTime="2026-01-23 07:15:56.91267711 +0000 UTC m=+1307.733026085" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.950599 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7c6b47c6df-vm2sf"] Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.967572 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" podStartSLOduration=6.052493559 podStartE2EDuration="9.967533807s" podCreationTimestamp="2026-01-23 07:15:47 +0000 UTC" firstStartedPulling="2026-01-23 07:15:51.257560435 +0000 UTC m=+1302.077909410" lastFinishedPulling="2026-01-23 07:15:55.172600683 +0000 UTC m=+1305.992949658" observedRunningTime="2026-01-23 07:15:56.934245391 +0000 UTC m=+1307.754594376" watchObservedRunningTime="2026-01-23 07:15:56.967533807 +0000 UTC m=+1307.787882782" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.971854 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/barbican-api-f86b8db9b-zlplv" podStartSLOduration=5.971834691 podStartE2EDuration="5.971834691s" podCreationTimestamp="2026-01-23 07:15:51 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:15:56.957899128 +0000 UTC m=+1307.778248103" watchObservedRunningTime="2026-01-23 07:15:56.971834691 +0000 UTC m=+1307.792183666" Jan 23 07:15:56 crc kubenswrapper[5102]: I0123 07:15:56.991736 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-58b7895cd-h85fc"] Jan 23 07:15:58 crc kubenswrapper[5102]: I0123 07:15:58.859239 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener-log" containerID="cri-o://1c2b7872a2545b66a57c882256af7217337d1fd8dff89dcd483e2cb0e43f8857" gracePeriod=30 Jan 23 07:15:58 crc kubenswrapper[5102]: I0123 07:15:58.859299 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener" containerID="cri-o://5d6015bcce119402ae68512013660f0b377ef051da45613130cfb7bef505abde" gracePeriod=30 Jan 23 07:15:58 crc kubenswrapper[5102]: I0123 07:15:58.859351 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker-log" containerID="cri-o://e67f52221dfd73d87e1424d36597ef15ba815b679b3c3a42a426b09b6d7202ba" gracePeriod=30 Jan 23 07:15:58 crc kubenswrapper[5102]: I0123 07:15:58.859439 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker" containerID="cri-o://7e93161384bff9dc096aa436e0fd11405c0eaa0866d1c12e9a8ccebe22e56097" gracePeriod=30 Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.812882 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.874135 5102 generic.go:334] "Generic (PLEG): container finished" podID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerID="7e93161384bff9dc096aa436e0fd11405c0eaa0866d1c12e9a8ccebe22e56097" exitCode=0 Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.875062 5102 generic.go:334] "Generic (PLEG): container finished" podID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerID="e67f52221dfd73d87e1424d36597ef15ba815b679b3c3a42a426b09b6d7202ba" exitCode=143 Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.874328 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" event={"ID":"d8d80be7-3115-4f2c-81aa-b906b26f339e","Type":"ContainerDied","Data":"7e93161384bff9dc096aa436e0fd11405c0eaa0866d1c12e9a8ccebe22e56097"} Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.875283 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" event={"ID":"d8d80be7-3115-4f2c-81aa-b906b26f339e","Type":"ContainerDied","Data":"e67f52221dfd73d87e1424d36597ef15ba815b679b3c3a42a426b09b6d7202ba"} Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.881374 5102 generic.go:334] "Generic (PLEG): container finished" podID="d0544141-4b33-4876-9946-575472f62d80" containerID="5d6015bcce119402ae68512013660f0b377ef051da45613130cfb7bef505abde" exitCode=0 Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.881470 5102 generic.go:334] "Generic (PLEG): container finished" podID="d0544141-4b33-4876-9946-575472f62d80" containerID="1c2b7872a2545b66a57c882256af7217337d1fd8dff89dcd483e2cb0e43f8857" exitCode=143 Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.881457 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" event={"ID":"d0544141-4b33-4876-9946-575472f62d80","Type":"ContainerDied","Data":"5d6015bcce119402ae68512013660f0b377ef051da45613130cfb7bef505abde"} Jan 23 07:15:59 crc kubenswrapper[5102]: I0123 07:15:59.881677 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" event={"ID":"d0544141-4b33-4876-9946-575472f62d80","Type":"ContainerDied","Data":"1c2b7872a2545b66a57c882256af7217337d1fd8dff89dcd483e2cb0e43f8857"} Jan 23 07:16:00 crc kubenswrapper[5102]: I0123 07:16:00.035035 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:16:00 crc kubenswrapper[5102]: I0123 07:16:00.048166 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:16:00 crc kubenswrapper[5102]: I0123 07:16:00.088823 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:16:01 crc kubenswrapper[5102]: I0123 07:16:01.907303 5102 generic.go:334] "Generic (PLEG): container finished" podID="ad1791e1-86ab-44e5-99e9-399e93cffc68" containerID="b5931e20a6ae974a9df4e142bed61f7199f857b21d6f031cee275cb71eed9329" exitCode=0 Jan 23 07:16:01 crc kubenswrapper[5102]: I0123 07:16:01.907656 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c2dfg" event={"ID":"ad1791e1-86ab-44e5-99e9-399e93cffc68","Type":"ContainerDied","Data":"b5931e20a6ae974a9df4e142bed61f7199f857b21d6f031cee275cb71eed9329"} Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.123621 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.205727 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-5d7pk"] Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.205983 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" podUID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerName="dnsmasq-dns" containerID="cri-o://d0d9ee8920f578f474cc5802e280483068b7dd4c912f6141ece58d9c34aba17d" gracePeriod=10 Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.364198 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.511288 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.587859 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5f4bb86bfb-vm5xl"] Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.588161 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api-log" containerID="cri-o://522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20" gracePeriod=30 Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.589020 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api" containerID="cri-o://655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1" gracePeriod=30 Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.598839 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": EOF" Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.935113 5102 generic.go:334] "Generic (PLEG): container finished" podID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerID="d0d9ee8920f578f474cc5802e280483068b7dd4c912f6141ece58d9c34aba17d" exitCode=0 Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.935190 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" event={"ID":"0d6a2bb1-58af-4994-81ff-15562e1f9a4f","Type":"ContainerDied","Data":"d0d9ee8920f578f474cc5802e280483068b7dd4c912f6141ece58d9c34aba17d"} Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.944564 5102 generic.go:334] "Generic (PLEG): container finished" podID="253392be-f480-4837-b1c2-4d92fe442c09" containerID="522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20" exitCode=143 Jan 23 07:16:03 crc kubenswrapper[5102]: I0123 07:16:03.946152 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" event={"ID":"253392be-f480-4837-b1c2-4d92fe442c09","Type":"ContainerDied","Data":"522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20"} Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.164789 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.265296 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-config-data\") pod \"ad1791e1-86ab-44e5-99e9-399e93cffc68\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.265394 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f67t9\" (UniqueName: \"kubernetes.io/projected/ad1791e1-86ab-44e5-99e9-399e93cffc68-kube-api-access-f67t9\") pod \"ad1791e1-86ab-44e5-99e9-399e93cffc68\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.265465 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-scripts\") pod \"ad1791e1-86ab-44e5-99e9-399e93cffc68\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.265567 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-combined-ca-bundle\") pod \"ad1791e1-86ab-44e5-99e9-399e93cffc68\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.265607 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-db-sync-config-data\") pod \"ad1791e1-86ab-44e5-99e9-399e93cffc68\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.265690 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad1791e1-86ab-44e5-99e9-399e93cffc68-etc-machine-id\") pod \"ad1791e1-86ab-44e5-99e9-399e93cffc68\" (UID: \"ad1791e1-86ab-44e5-99e9-399e93cffc68\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.266193 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ad1791e1-86ab-44e5-99e9-399e93cffc68-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "ad1791e1-86ab-44e5-99e9-399e93cffc68" (UID: "ad1791e1-86ab-44e5-99e9-399e93cffc68"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.274558 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-scripts" (OuterVolumeSpecName: "scripts") pod "ad1791e1-86ab-44e5-99e9-399e93cffc68" (UID: "ad1791e1-86ab-44e5-99e9-399e93cffc68"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.286010 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ad1791e1-86ab-44e5-99e9-399e93cffc68-kube-api-access-f67t9" (OuterVolumeSpecName: "kube-api-access-f67t9") pod "ad1791e1-86ab-44e5-99e9-399e93cffc68" (UID: "ad1791e1-86ab-44e5-99e9-399e93cffc68"). InnerVolumeSpecName "kube-api-access-f67t9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.287660 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-db-sync-config-data" (OuterVolumeSpecName: "db-sync-config-data") pod "ad1791e1-86ab-44e5-99e9-399e93cffc68" (UID: "ad1791e1-86ab-44e5-99e9-399e93cffc68"). InnerVolumeSpecName "db-sync-config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.338892 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ad1791e1-86ab-44e5-99e9-399e93cffc68" (UID: "ad1791e1-86ab-44e5-99e9-399e93cffc68"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.362552 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-config-data" (OuterVolumeSpecName: "config-data") pod "ad1791e1-86ab-44e5-99e9-399e93cffc68" (UID: "ad1791e1-86ab-44e5-99e9-399e93cffc68"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.365232 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.369065 5102 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/ad1791e1-86ab-44e5-99e9-399e93cffc68-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.369090 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.369100 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f67t9\" (UniqueName: \"kubernetes.io/projected/ad1791e1-86ab-44e5-99e9-399e93cffc68-kube-api-access-f67t9\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.369108 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.369118 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.369125 5102 reconciler_common.go:293] "Volume detached for volume \"db-sync-config-data\" (UniqueName: \"kubernetes.io/secret/ad1791e1-86ab-44e5-99e9-399e93cffc68-db-sync-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.470523 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data\") pod \"d8d80be7-3115-4f2c-81aa-b906b26f339e\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.470621 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data-custom\") pod \"d8d80be7-3115-4f2c-81aa-b906b26f339e\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.470770 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tzvk2\" (UniqueName: \"kubernetes.io/projected/d8d80be7-3115-4f2c-81aa-b906b26f339e-kube-api-access-tzvk2\") pod \"d8d80be7-3115-4f2c-81aa-b906b26f339e\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.470795 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d80be7-3115-4f2c-81aa-b906b26f339e-logs\") pod \"d8d80be7-3115-4f2c-81aa-b906b26f339e\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.470833 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-combined-ca-bundle\") pod \"d8d80be7-3115-4f2c-81aa-b906b26f339e\" (UID: \"d8d80be7-3115-4f2c-81aa-b906b26f339e\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.477013 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8d80be7-3115-4f2c-81aa-b906b26f339e-kube-api-access-tzvk2" (OuterVolumeSpecName: "kube-api-access-tzvk2") pod "d8d80be7-3115-4f2c-81aa-b906b26f339e" (UID: "d8d80be7-3115-4f2c-81aa-b906b26f339e"). InnerVolumeSpecName "kube-api-access-tzvk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.477379 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d8d80be7-3115-4f2c-81aa-b906b26f339e-logs" (OuterVolumeSpecName: "logs") pod "d8d80be7-3115-4f2c-81aa-b906b26f339e" (UID: "d8d80be7-3115-4f2c-81aa-b906b26f339e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.477510 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.485654 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d8d80be7-3115-4f2c-81aa-b906b26f339e" (UID: "d8d80be7-3115-4f2c-81aa-b906b26f339e"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.491029 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.535772 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d8d80be7-3115-4f2c-81aa-b906b26f339e" (UID: "d8d80be7-3115-4f2c-81aa-b906b26f339e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575233 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0544141-4b33-4876-9946-575472f62d80-logs\") pod \"d0544141-4b33-4876-9946-575472f62d80\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575323 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-sb\") pod \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575358 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-svc\") pod \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575399 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-combined-ca-bundle\") pod \"d0544141-4b33-4876-9946-575472f62d80\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575418 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data\") pod \"d0544141-4b33-4876-9946-575472f62d80\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575450 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data-custom\") pod \"d0544141-4b33-4876-9946-575472f62d80\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575473 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-config\") pod \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575503 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-swift-storage-0\") pod \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575592 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-nb\") pod \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575610 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2psvx\" (UniqueName: \"kubernetes.io/projected/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-kube-api-access-2psvx\") pod \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\" (UID: \"0d6a2bb1-58af-4994-81ff-15562e1f9a4f\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.575677 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnmhp\" (UniqueName: \"kubernetes.io/projected/d0544141-4b33-4876-9946-575472f62d80-kube-api-access-gnmhp\") pod \"d0544141-4b33-4876-9946-575472f62d80\" (UID: \"d0544141-4b33-4876-9946-575472f62d80\") " Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.576057 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tzvk2\" (UniqueName: \"kubernetes.io/projected/d8d80be7-3115-4f2c-81aa-b906b26f339e-kube-api-access-tzvk2\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.576084 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d8d80be7-3115-4f2c-81aa-b906b26f339e-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.576099 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.576109 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.576513 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d0544141-4b33-4876-9946-575472f62d80-logs" (OuterVolumeSpecName: "logs") pod "d0544141-4b33-4876-9946-575472f62d80" (UID: "d0544141-4b33-4876-9946-575472f62d80"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.579650 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0544141-4b33-4876-9946-575472f62d80-kube-api-access-gnmhp" (OuterVolumeSpecName: "kube-api-access-gnmhp") pod "d0544141-4b33-4876-9946-575472f62d80" (UID: "d0544141-4b33-4876-9946-575472f62d80"). InnerVolumeSpecName "kube-api-access-gnmhp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.605257 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data" (OuterVolumeSpecName: "config-data") pod "d8d80be7-3115-4f2c-81aa-b906b26f339e" (UID: "d8d80be7-3115-4f2c-81aa-b906b26f339e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.616525 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "d0544141-4b33-4876-9946-575472f62d80" (UID: "d0544141-4b33-4876-9946-575472f62d80"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.620743 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-kube-api-access-2psvx" (OuterVolumeSpecName: "kube-api-access-2psvx") pod "0d6a2bb1-58af-4994-81ff-15562e1f9a4f" (UID: "0d6a2bb1-58af-4994-81ff-15562e1f9a4f"). InnerVolumeSpecName "kube-api-access-2psvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.637622 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0544141-4b33-4876-9946-575472f62d80" (UID: "d0544141-4b33-4876-9946-575472f62d80"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.679019 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d0544141-4b33-4876-9946-575472f62d80-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.679048 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.679057 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.679065 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2psvx\" (UniqueName: \"kubernetes.io/projected/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-kube-api-access-2psvx\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.679075 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d8d80be7-3115-4f2c-81aa-b906b26f339e-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.679084 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnmhp\" (UniqueName: \"kubernetes.io/projected/d0544141-4b33-4876-9946-575472f62d80-kube-api-access-gnmhp\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.693914 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "0d6a2bb1-58af-4994-81ff-15562e1f9a4f" (UID: "0d6a2bb1-58af-4994-81ff-15562e1f9a4f"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.695919 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-config" (OuterVolumeSpecName: "config") pod "0d6a2bb1-58af-4994-81ff-15562e1f9a4f" (UID: "0d6a2bb1-58af-4994-81ff-15562e1f9a4f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.709117 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "0d6a2bb1-58af-4994-81ff-15562e1f9a4f" (UID: "0d6a2bb1-58af-4994-81ff-15562e1f9a4f"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.720741 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data" (OuterVolumeSpecName: "config-data") pod "d0544141-4b33-4876-9946-575472f62d80" (UID: "d0544141-4b33-4876-9946-575472f62d80"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.721254 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "0d6a2bb1-58af-4994-81ff-15562e1f9a4f" (UID: "0d6a2bb1-58af-4994-81ff-15562e1f9a4f"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.727198 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "0d6a2bb1-58af-4994-81ff-15562e1f9a4f" (UID: "0d6a2bb1-58af-4994-81ff-15562e1f9a4f"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.781337 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.781383 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.781395 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.781411 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d0544141-4b33-4876-9946-575472f62d80-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.781423 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.781434 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/0d6a2bb1-58af-4994-81ff-15562e1f9a4f-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.955900 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" event={"ID":"0d6a2bb1-58af-4994-81ff-15562e1f9a4f","Type":"ContainerDied","Data":"b825d9747ead5a361a85d9a78b2effb435535a5499cfa8dfaab0cc79f85dc971"} Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.955963 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-6f6f8cb849-5d7pk" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.956004 5102 scope.go:117] "RemoveContainer" containerID="d0d9ee8920f578f474cc5802e280483068b7dd4c912f6141ece58d9c34aba17d" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.961157 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" event={"ID":"d0544141-4b33-4876-9946-575472f62d80","Type":"ContainerDied","Data":"b8e032662c2c6bb9e5c6da19ed0766ccd9dec60457653b713643ec248a36621b"} Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.961254 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-58b7895cd-h85fc" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.975595 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-db-sync-c2dfg" event={"ID":"ad1791e1-86ab-44e5-99e9-399e93cffc68","Type":"ContainerDied","Data":"aeb738f5f5811ebe874061614093cb98c94373eca748b6fd768610a66f199fbb"} Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.975642 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="aeb738f5f5811ebe874061614093cb98c94373eca748b6fd768610a66f199fbb" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.975825 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-db-sync-c2dfg" Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.999031 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerStarted","Data":"e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5"} Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.999177 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-central-agent" containerID="cri-o://2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a" gracePeriod=30 Jan 23 07:16:04 crc kubenswrapper[5102]: I0123 07:16:04.999235 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:04.999336 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="proxy-httpd" containerID="cri-o://e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5" gracePeriod=30 Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:04.999351 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="sg-core" containerID="cri-o://127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef" gracePeriod=30 Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:04.999398 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-notification-agent" containerID="cri-o://341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b" gracePeriod=30 Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.017418 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" event={"ID":"d8d80be7-3115-4f2c-81aa-b906b26f339e","Type":"ContainerDied","Data":"d7896ca35364a829e3c667d39afe7ff569f912adde7776d51eabab5d24a1a9fc"} Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.017652 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-7c6b47c6df-vm2sf" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.051038 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.251718679 podStartE2EDuration="50.051001358s" podCreationTimestamp="2026-01-23 07:15:15 +0000 UTC" firstStartedPulling="2026-01-23 07:15:17.239035107 +0000 UTC m=+1268.059384082" lastFinishedPulling="2026-01-23 07:16:04.038317786 +0000 UTC m=+1314.858666761" observedRunningTime="2026-01-23 07:16:05.032194513 +0000 UTC m=+1315.852543508" watchObservedRunningTime="2026-01-23 07:16:05.051001358 +0000 UTC m=+1315.871350333" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.064147 5102 scope.go:117] "RemoveContainer" containerID="14575420a64edb55b314ff4ac6ea25e54273a0c20daa2c997107bf81eb7926fc" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.071765 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-58b7895cd-h85fc"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.087294 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-58b7895cd-h85fc"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.097582 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-5d7pk"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.103351 5102 scope.go:117] "RemoveContainer" containerID="5d6015bcce119402ae68512013660f0b377ef051da45613130cfb7bef505abde" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.109448 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-6f6f8cb849-5d7pk"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.124134 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-7c6b47c6df-vm2sf"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.136812 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-7c6b47c6df-vm2sf"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.177732 5102 scope.go:117] "RemoveContainer" containerID="1c2b7872a2545b66a57c882256af7217337d1fd8dff89dcd483e2cb0e43f8857" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.204701 5102 scope.go:117] "RemoveContainer" containerID="7e93161384bff9dc096aa436e0fd11405c0eaa0866d1c12e9a8ccebe22e56097" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.236608 5102 scope.go:117] "RemoveContainer" containerID="e67f52221dfd73d87e1424d36597ef15ba815b679b3c3a42a426b09b6d7202ba" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.488000 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:05 crc kubenswrapper[5102]: E0123 07:16:05.491418 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerName="dnsmasq-dns" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491442 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerName="dnsmasq-dns" Jan 23 07:16:05 crc kubenswrapper[5102]: E0123 07:16:05.491454 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker-log" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491464 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker-log" Jan 23 07:16:05 crc kubenswrapper[5102]: E0123 07:16:05.491482 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerName="init" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491487 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerName="init" Jan 23 07:16:05 crc kubenswrapper[5102]: E0123 07:16:05.491495 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener-log" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491502 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener-log" Jan 23 07:16:05 crc kubenswrapper[5102]: E0123 07:16:05.491522 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491529 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker" Jan 23 07:16:05 crc kubenswrapper[5102]: E0123 07:16:05.491551 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ad1791e1-86ab-44e5-99e9-399e93cffc68" containerName="cinder-db-sync" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491557 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ad1791e1-86ab-44e5-99e9-399e93cffc68" containerName="cinder-db-sync" Jan 23 07:16:05 crc kubenswrapper[5102]: E0123 07:16:05.491569 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491575 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491741 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" containerName="dnsmasq-dns" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491754 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker-log" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491771 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ad1791e1-86ab-44e5-99e9-399e93cffc68" containerName="cinder-db-sync" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491780 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener-log" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491790 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" containerName="barbican-worker" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.491802 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0544141-4b33-4876-9946-575472f62d80" containerName="barbican-keystone-listener" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.492762 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.498279 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-cinder-dockercfg-qkq76" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.498344 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-config-data" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.498607 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.498748 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scripts" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.507001 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.624063 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.624159 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0911dabf-0740-44ff-96ad-db85e3514a73-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.624297 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.624376 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j96lf\" (UniqueName: \"kubernetes.io/projected/0911dabf-0740-44ff-96ad-db85e3514a73-kube-api-access-j96lf\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.624425 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-scripts\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.624445 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.631998 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d6a2bb1-58af-4994-81ff-15562e1f9a4f" path="/var/lib/kubelet/pods/0d6a2bb1-58af-4994-81ff-15562e1f9a4f/volumes" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.633500 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0544141-4b33-4876-9946-575472f62d80" path="/var/lib/kubelet/pods/d0544141-4b33-4876-9946-575472f62d80/volumes" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.634274 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d8d80be7-3115-4f2c-81aa-b906b26f339e" path="/var/lib/kubelet/pods/d8d80be7-3115-4f2c-81aa-b906b26f339e/volumes" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.636739 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-qshcb"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.641608 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.648577 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-qshcb"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.716160 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.719391 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.725472 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.731677 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.732988 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-config\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733026 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j96lf\" (UniqueName: \"kubernetes.io/projected/0911dabf-0740-44ff-96ad-db85e3514a73-kube-api-access-j96lf\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733064 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-nb\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733110 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-scripts\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733129 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733165 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jm68f\" (UniqueName: \"kubernetes.io/projected/00cc675d-d659-4c3a-ad06-589435804d40-kube-api-access-jm68f\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733185 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733241 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-svc\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733265 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0911dabf-0740-44ff-96ad-db85e3514a73-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733289 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-sb\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733347 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-swift-storage-0\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.733415 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.734399 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0911dabf-0740-44ff-96ad-db85e3514a73-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.747152 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.749341 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-scripts\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.751947 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.752675 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j96lf\" (UniqueName: \"kubernetes.io/projected/0911dabf-0740-44ff-96ad-db85e3514a73-kube-api-access-j96lf\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.753060 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data\") pod \"cinder-scheduler-0\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.834566 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-logs\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.834620 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.834653 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jm68f\" (UniqueName: \"kubernetes.io/projected/00cc675d-d659-4c3a-ad06-589435804d40-kube-api-access-jm68f\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.834790 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.834865 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-svc\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.834912 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-sb\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.834983 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wqww2\" (UniqueName: \"kubernetes.io/projected/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-kube-api-access-wqww2\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.835045 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-swift-storage-0\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.835093 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.835218 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-scripts\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.835247 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data-custom\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.835308 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-config\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.835395 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-nb\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.835828 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-svc\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.836178 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-nb\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.836386 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-sb\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.837057 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-swift-storage-0\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.838966 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-config\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.857982 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jm68f\" (UniqueName: \"kubernetes.io/projected/00cc675d-d659-4c3a-ad06-589435804d40-kube-api-access-jm68f\") pod \"dnsmasq-dns-75dbb546bf-qshcb\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.863127 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.936912 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-logs\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.936978 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.937036 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.937093 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wqww2\" (UniqueName: \"kubernetes.io/projected/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-kube-api-access-wqww2\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.937141 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.937195 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-scripts\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.937222 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data-custom\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.938701 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-logs\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.939625 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-etc-machine-id\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.941035 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data-custom\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.945484 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.963564 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-scripts\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.964071 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.990604 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:05 crc kubenswrapper[5102]: I0123 07:16:05.992767 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wqww2\" (UniqueName: \"kubernetes.io/projected/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-kube-api-access-wqww2\") pod \"cinder-api-0\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " pod="openstack/cinder-api-0" Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.047210 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.049336 5102 generic.go:334] "Generic (PLEG): container finished" podID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerID="e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5" exitCode=0 Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.049370 5102 generic.go:334] "Generic (PLEG): container finished" podID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerID="127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef" exitCode=2 Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.049377 5102 generic.go:334] "Generic (PLEG): container finished" podID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerID="2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a" exitCode=0 Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.049414 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerDied","Data":"e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5"} Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.049441 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerDied","Data":"127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef"} Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.049451 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerDied","Data":"2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a"} Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.372099 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:06 crc kubenswrapper[5102]: W0123 07:16:06.379434 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0911dabf_0740_44ff_96ad_db85e3514a73.slice/crio-bf9fad402cbc96186801a138ac7dc697efdb859f7c42bed295eece852d48280c WatchSource:0}: Error finding container bf9fad402cbc96186801a138ac7dc697efdb859f7c42bed295eece852d48280c: Status 404 returned error can't find the container with id bf9fad402cbc96186801a138ac7dc697efdb859f7c42bed295eece852d48280c Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.570047 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-qshcb"] Jan 23 07:16:06 crc kubenswrapper[5102]: W0123 07:16:06.580051 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0d7aee5f_d1ec_4c48_8449_cde7c0ca6a9c.slice/crio-eb6deda77f252cbc1327e486eff69ca88e6f6d0d661c8ca1516fce530257e793 WatchSource:0}: Error finding container eb6deda77f252cbc1327e486eff69ca88e6f6d0d661c8ca1516fce530257e793: Status 404 returned error can't find the container with id eb6deda77f252cbc1327e486eff69ca88e6f6d0d661c8ca1516fce530257e793 Jan 23 07:16:06 crc kubenswrapper[5102]: I0123 07:16:06.584937 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:07 crc kubenswrapper[5102]: I0123 07:16:07.069840 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0911dabf-0740-44ff-96ad-db85e3514a73","Type":"ContainerStarted","Data":"bf9fad402cbc96186801a138ac7dc697efdb859f7c42bed295eece852d48280c"} Jan 23 07:16:07 crc kubenswrapper[5102]: I0123 07:16:07.072269 5102 generic.go:334] "Generic (PLEG): container finished" podID="00cc675d-d659-4c3a-ad06-589435804d40" containerID="42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5" exitCode=0 Jan 23 07:16:07 crc kubenswrapper[5102]: I0123 07:16:07.072352 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" event={"ID":"00cc675d-d659-4c3a-ad06-589435804d40","Type":"ContainerDied","Data":"42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5"} Jan 23 07:16:07 crc kubenswrapper[5102]: I0123 07:16:07.072427 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" event={"ID":"00cc675d-d659-4c3a-ad06-589435804d40","Type":"ContainerStarted","Data":"ee9411c3dcea3f6143759a7127717dac321f9d0fb5c9c81c65647225381c2df9"} Jan 23 07:16:07 crc kubenswrapper[5102]: I0123 07:16:07.073681 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c","Type":"ContainerStarted","Data":"eb6deda77f252cbc1327e486eff69ca88e6f6d0d661c8ca1516fce530257e793"} Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.089015 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0911dabf-0740-44ff-96ad-db85e3514a73","Type":"ContainerStarted","Data":"14009590c3814bccb1391feb28993865b5dc0b1fe2def076b2d421d7ae15681f"} Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.094688 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" event={"ID":"00cc675d-d659-4c3a-ad06-589435804d40","Type":"ContainerStarted","Data":"1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b"} Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.094839 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.097954 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c","Type":"ContainerStarted","Data":"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98"} Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.124233 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" podStartSLOduration=3.12420577 podStartE2EDuration="3.12420577s" podCreationTimestamp="2026-01-23 07:16:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:08.121038621 +0000 UTC m=+1318.941387606" watchObservedRunningTime="2026-01-23 07:16:08.12420577 +0000 UTC m=+1318.944554745" Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.572311 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.863912 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.947252 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-run-httpd\") pod \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.947316 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f79qn\" (UniqueName: \"kubernetes.io/projected/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-kube-api-access-f79qn\") pod \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.947373 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-log-httpd\") pod \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.947454 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-scripts\") pod \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.947501 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-combined-ca-bundle\") pod \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.947584 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-sg-core-conf-yaml\") pod \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.947610 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-config-data\") pod \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\" (UID: \"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262\") " Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.948948 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" (UID: "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.957904 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" (UID: "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.969739 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-scripts" (OuterVolumeSpecName: "scripts") pod "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" (UID: "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:08 crc kubenswrapper[5102]: I0123 07:16:08.986695 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-kube-api-access-f79qn" (OuterVolumeSpecName: "kube-api-access-f79qn") pod "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" (UID: "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262"). InnerVolumeSpecName "kube-api-access-f79qn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.028660 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" (UID: "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.042433 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": read tcp 10.217.0.2:35882->10.217.0.163:9311: read: connection reset by peer" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.042435 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.163:9311/healthcheck\": read tcp 10.217.0.2:35884->10.217.0.163:9311: read: connection reset by peer" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.052350 5102 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.052393 5102 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.052408 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f79qn\" (UniqueName: \"kubernetes.io/projected/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-kube-api-access-f79qn\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.052423 5102 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.052436 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.063892 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" (UID: "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.096816 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-config-data" (OuterVolumeSpecName: "config-data") pod "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" (UID: "e89c16d7-5d5b-4c9f-8b54-a43d59cf4262"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.115619 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0911dabf-0740-44ff-96ad-db85e3514a73","Type":"ContainerStarted","Data":"e9b438f1d5c455cfad45873923ead3b76e54f7e9650284708b9e5741ed1916fd"} Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.132980 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerDied","Data":"341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b"} Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.133079 5102 scope.go:117] "RemoveContainer" containerID="e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.133165 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.144181 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.39898878 podStartE2EDuration="4.144148419s" podCreationTimestamp="2026-01-23 07:16:05 +0000 UTC" firstStartedPulling="2026-01-23 07:16:06.383006637 +0000 UTC m=+1317.203355622" lastFinishedPulling="2026-01-23 07:16:07.128166296 +0000 UTC m=+1317.948515261" observedRunningTime="2026-01-23 07:16:09.141592889 +0000 UTC m=+1319.961941854" watchObservedRunningTime="2026-01-23 07:16:09.144148419 +0000 UTC m=+1319.964497394" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.132794 5102 generic.go:334] "Generic (PLEG): container finished" podID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerID="341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b" exitCode=0 Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.147917 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e89c16d7-5d5b-4c9f-8b54-a43d59cf4262","Type":"ContainerDied","Data":"0f50c88b41b6835a4d76349f7c33773a9cfb4233332166701e64fd389d0a1165"} Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.154694 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.154717 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.155060 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c","Type":"ContainerStarted","Data":"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a"} Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.155497 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.186872 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=4.186831697 podStartE2EDuration="4.186831697s" podCreationTimestamp="2026-01-23 07:16:05 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:09.178200679 +0000 UTC m=+1319.998549654" watchObservedRunningTime="2026-01-23 07:16:09.186831697 +0000 UTC m=+1320.007180682" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.254184 5102 scope.go:117] "RemoveContainer" containerID="127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.259421 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.283430 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.292766 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.293381 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-notification-agent" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293396 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-notification-agent" Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.293422 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-central-agent" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293429 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-central-agent" Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.293458 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="sg-core" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293480 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="sg-core" Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.293491 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="proxy-httpd" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293498 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="proxy-httpd" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293722 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="sg-core" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293743 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="proxy-httpd" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293760 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-notification-agent" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.293779 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" containerName="ceilometer-central-agent" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.298139 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.301882 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.304248 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.319318 5102 scope.go:117] "RemoveContainer" containerID="341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.322694 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.365527 5102 scope.go:117] "RemoveContainer" containerID="2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.407185 5102 scope.go:117] "RemoveContainer" containerID="e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5" Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.408227 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5\": container with ID starting with e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5 not found: ID does not exist" containerID="e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.408294 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5"} err="failed to get container status \"e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5\": rpc error: code = NotFound desc = could not find container \"e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5\": container with ID starting with e98ddbd3a082163f6ecb41350a623e0d2fbe7dae04c7bcc0d1666e774805ede5 not found: ID does not exist" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.408329 5102 scope.go:117] "RemoveContainer" containerID="127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef" Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.409028 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef\": container with ID starting with 127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef not found: ID does not exist" containerID="127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.409061 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef"} err="failed to get container status \"127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef\": rpc error: code = NotFound desc = could not find container \"127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef\": container with ID starting with 127f0b2385565235aec8f2728248082d0af9895dfe5bf2e9dfe8b6f004b81bef not found: ID does not exist" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.409079 5102 scope.go:117] "RemoveContainer" containerID="341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b" Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.409396 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b\": container with ID starting with 341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b not found: ID does not exist" containerID="341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.409426 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b"} err="failed to get container status \"341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b\": rpc error: code = NotFound desc = could not find container \"341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b\": container with ID starting with 341e0eab8245cfd0328f3a26899296137eb04efdc062978fadc5b832c77bc69b not found: ID does not exist" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.409450 5102 scope.go:117] "RemoveContainer" containerID="2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a" Jan 23 07:16:09 crc kubenswrapper[5102]: E0123 07:16:09.409672 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a\": container with ID starting with 2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a not found: ID does not exist" containerID="2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.409704 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a"} err="failed to get container status \"2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a\": rpc error: code = NotFound desc = could not find container \"2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a\": container with ID starting with 2332f3569054c8b497d3f5213e8f44236fd71929f9d49413f93e27f47d73ad9a not found: ID does not exist" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.460866 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-scripts\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.460927 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.460948 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-config-data\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.460978 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-run-httpd\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.461207 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zc4qr\" (UniqueName: \"kubernetes.io/projected/37bb771d-1ead-44bb-9dac-88dacf382850-kube-api-access-zc4qr\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.461346 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.461401 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-log-httpd\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.563588 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zc4qr\" (UniqueName: \"kubernetes.io/projected/37bb771d-1ead-44bb-9dac-88dacf382850-kube-api-access-zc4qr\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.564042 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.564070 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-log-httpd\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.564175 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-scripts\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.564215 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.564242 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-config-data\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.564276 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-run-httpd\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.563727 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.564938 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-run-httpd\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.565159 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-log-httpd\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.569097 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.570471 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.578148 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.583523 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-config-data\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.584124 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.584638 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-scripts\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.589561 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zc4qr\" (UniqueName: \"kubernetes.io/projected/37bb771d-1ead-44bb-9dac-88dacf382850-kube-api-access-zc4qr\") pod \"ceilometer-0\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.635473 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.662853 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e89c16d7-5d5b-4c9f-8b54-a43d59cf4262" path="/var/lib/kubelet/pods/e89c16d7-5d5b-4c9f-8b54-a43d59cf4262/volumes" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.665418 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253392be-f480-4837-b1c2-4d92fe442c09-logs\") pod \"253392be-f480-4837-b1c2-4d92fe442c09\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.665493 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data-custom\") pod \"253392be-f480-4837-b1c2-4d92fe442c09\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.668550 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data\") pod \"253392be-f480-4837-b1c2-4d92fe442c09\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.668652 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-combined-ca-bundle\") pod \"253392be-f480-4837-b1c2-4d92fe442c09\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.668740 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kf5km\" (UniqueName: \"kubernetes.io/projected/253392be-f480-4837-b1c2-4d92fe442c09-kube-api-access-kf5km\") pod \"253392be-f480-4837-b1c2-4d92fe442c09\" (UID: \"253392be-f480-4837-b1c2-4d92fe442c09\") " Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.669033 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/253392be-f480-4837-b1c2-4d92fe442c09-logs" (OuterVolumeSpecName: "logs") pod "253392be-f480-4837-b1c2-4d92fe442c09" (UID: "253392be-f480-4837-b1c2-4d92fe442c09"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.670823 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/253392be-f480-4837-b1c2-4d92fe442c09-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.674492 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/253392be-f480-4837-b1c2-4d92fe442c09-kube-api-access-kf5km" (OuterVolumeSpecName: "kube-api-access-kf5km") pod "253392be-f480-4837-b1c2-4d92fe442c09" (UID: "253392be-f480-4837-b1c2-4d92fe442c09"). InnerVolumeSpecName "kube-api-access-kf5km". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.676901 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "253392be-f480-4837-b1c2-4d92fe442c09" (UID: "253392be-f480-4837-b1c2-4d92fe442c09"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.711687 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "253392be-f480-4837-b1c2-4d92fe442c09" (UID: "253392be-f480-4837-b1c2-4d92fe442c09"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.733764 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data" (OuterVolumeSpecName: "config-data") pod "253392be-f480-4837-b1c2-4d92fe442c09" (UID: "253392be-f480-4837-b1c2-4d92fe442c09"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.774506 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.774911 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.774925 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kf5km\" (UniqueName: \"kubernetes.io/projected/253392be-f480-4837-b1c2-4d92fe442c09-kube-api-access-kf5km\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:09 crc kubenswrapper[5102]: I0123 07:16:09.774940 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/253392be-f480-4837-b1c2-4d92fe442c09-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.124413 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.165069 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerStarted","Data":"8949188710d2acefbeeaf2dac877579f491960779f60c555b134c69d2690e9da"} Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.168371 5102 generic.go:334] "Generic (PLEG): container finished" podID="253392be-f480-4837-b1c2-4d92fe442c09" containerID="655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1" exitCode=0 Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.168893 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.169806 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" event={"ID":"253392be-f480-4837-b1c2-4d92fe442c09","Type":"ContainerDied","Data":"655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1"} Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.169851 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-5f4bb86bfb-vm5xl" event={"ID":"253392be-f480-4837-b1c2-4d92fe442c09","Type":"ContainerDied","Data":"3162d1960db8510e71507894e2b487bff1c1f991fe81eaad085ec3425af1df9c"} Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.169880 5102 scope.go:117] "RemoveContainer" containerID="655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.170120 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api-log" containerID="cri-o://8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98" gracePeriod=30 Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.170219 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api" containerID="cri-o://c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a" gracePeriod=30 Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.225409 5102 scope.go:117] "RemoveContainer" containerID="522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.259994 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-5f4bb86bfb-vm5xl"] Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.262396 5102 scope.go:117] "RemoveContainer" containerID="655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1" Jan 23 07:16:10 crc kubenswrapper[5102]: E0123 07:16:10.266939 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1\": container with ID starting with 655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1 not found: ID does not exist" containerID="655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.267035 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1"} err="failed to get container status \"655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1\": rpc error: code = NotFound desc = could not find container \"655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1\": container with ID starting with 655b2682c27613f3da0b934e280b0ea49c75403f229100d32c6a823d1abd34c1 not found: ID does not exist" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.267099 5102 scope.go:117] "RemoveContainer" containerID="522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20" Jan 23 07:16:10 crc kubenswrapper[5102]: E0123 07:16:10.267838 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20\": container with ID starting with 522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20 not found: ID does not exist" containerID="522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.268383 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20"} err="failed to get container status \"522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20\": rpc error: code = NotFound desc = could not find container \"522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20\": container with ID starting with 522ca843f3ef5905b06098ff6ef973bd8e01c4fb1bdcdfbf6f68c1df704c7d20 not found: ID does not exist" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.275527 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-5f4bb86bfb-vm5xl"] Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.777231 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.863677 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.910094 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data-custom\") pod \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.910166 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-scripts\") pod \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.910230 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wqww2\" (UniqueName: \"kubernetes.io/projected/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-kube-api-access-wqww2\") pod \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.910268 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-logs\") pod \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.910293 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-etc-machine-id\") pod \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.910319 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-combined-ca-bundle\") pod \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.910372 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data\") pod \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\" (UID: \"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c\") " Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.912155 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" (UID: "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.912447 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-logs" (OuterVolumeSpecName: "logs") pod "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" (UID: "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.925890 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-kube-api-access-wqww2" (OuterVolumeSpecName: "kube-api-access-wqww2") pod "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" (UID: "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c"). InnerVolumeSpecName "kube-api-access-wqww2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.926198 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" (UID: "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.931489 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-scripts" (OuterVolumeSpecName: "scripts") pod "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" (UID: "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.953700 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" (UID: "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:10 crc kubenswrapper[5102]: I0123 07:16:10.972309 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data" (OuterVolumeSpecName: "config-data") pod "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" (UID: "0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.012679 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.012943 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.013004 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wqww2\" (UniqueName: \"kubernetes.io/projected/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-kube-api-access-wqww2\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.013066 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.013142 5102 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.013201 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.013267 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.179892 5102 generic.go:334] "Generic (PLEG): container finished" podID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerID="c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a" exitCode=0 Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.181257 5102 generic.go:334] "Generic (PLEG): container finished" podID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerID="8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98" exitCode=143 Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.180030 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c","Type":"ContainerDied","Data":"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a"} Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.181594 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c","Type":"ContainerDied","Data":"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98"} Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.181684 5102 scope.go:117] "RemoveContainer" containerID="c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.179976 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.181703 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c","Type":"ContainerDied","Data":"eb6deda77f252cbc1327e486eff69ca88e6f6d0d661c8ca1516fce530257e793"} Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.183870 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerStarted","Data":"7a02c4db745986b10d877c3eb63b6fd963ff2fa984f62d25a3227b1e3f516c49"} Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.228056 5102 scope.go:117] "RemoveContainer" containerID="8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.240713 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.249212 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283042 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:11 crc kubenswrapper[5102]: E0123 07:16:11.283418 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283438 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api" Jan 23 07:16:11 crc kubenswrapper[5102]: E0123 07:16:11.283460 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api-log" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283469 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api-log" Jan 23 07:16:11 crc kubenswrapper[5102]: E0123 07:16:11.283490 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api-log" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283496 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api-log" Jan 23 07:16:11 crc kubenswrapper[5102]: E0123 07:16:11.283508 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283513 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283689 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api-log" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283702 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api-log" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283714 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="253392be-f480-4837-b1c2-4d92fe442c09" containerName="barbican-api" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.283744 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" containerName="cinder-api" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.284669 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.287855 5102 scope.go:117] "RemoveContainer" containerID="c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.288209 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-internal-svc" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.288562 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-cinder-public-svc" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.291330 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-api-config-data" Jan 23 07:16:11 crc kubenswrapper[5102]: E0123 07:16:11.299400 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a\": container with ID starting with c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a not found: ID does not exist" containerID="c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.299454 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a"} err="failed to get container status \"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a\": rpc error: code = NotFound desc = could not find container \"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a\": container with ID starting with c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a not found: ID does not exist" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.299485 5102 scope.go:117] "RemoveContainer" containerID="8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98" Jan 23 07:16:11 crc kubenswrapper[5102]: E0123 07:16:11.304788 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98\": container with ID starting with 8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98 not found: ID does not exist" containerID="8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.304829 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98"} err="failed to get container status \"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98\": rpc error: code = NotFound desc = could not find container \"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98\": container with ID starting with 8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98 not found: ID does not exist" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.304849 5102 scope.go:117] "RemoveContainer" containerID="c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.308268 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a"} err="failed to get container status \"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a\": rpc error: code = NotFound desc = could not find container \"c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a\": container with ID starting with c462cdef219a3a586bf637662c9e26e0edc78007989c85785935b599c479509a not found: ID does not exist" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.308322 5102 scope.go:117] "RemoveContainer" containerID="8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.309329 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98"} err="failed to get container status \"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98\": rpc error: code = NotFound desc = could not find container \"8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98\": container with ID starting with 8db2f94c0209332ba1723868d0b9cd32ae8f36904514eff9d90c4700c5ad0e98 not found: ID does not exist" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.320473 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.424701 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425291 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425351 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-logs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425407 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-h8fbj\" (UniqueName: \"kubernetes.io/projected/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-kube-api-access-h8fbj\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425430 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425492 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425516 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-scripts\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425645 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data-custom\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.425669 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528275 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data-custom\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528329 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528383 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528486 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528579 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-logs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528651 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-h8fbj\" (UniqueName: \"kubernetes.io/projected/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-kube-api-access-h8fbj\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528679 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528780 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.528818 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-scripts\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.529230 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-logs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.529464 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-etc-machine-id\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.534100 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-public-tls-certs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.535391 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.535637 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-internal-tls-certs\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.535974 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-scripts\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.538899 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data-custom\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.546222 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-combined-ca-bundle\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.553223 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-h8fbj\" (UniqueName: \"kubernetes.io/projected/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-kube-api-access-h8fbj\") pod \"cinder-api-0\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.612996 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.636915 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c" path="/var/lib/kubelet/pods/0d7aee5f-d1ec-4c48-8449-cde7c0ca6a9c/volumes" Jan 23 07:16:11 crc kubenswrapper[5102]: I0123 07:16:11.638158 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="253392be-f480-4837-b1c2-4d92fe442c09" path="/var/lib/kubelet/pods/253392be-f480-4837-b1c2-4d92fe442c09/volumes" Jan 23 07:16:12 crc kubenswrapper[5102]: I0123 07:16:12.144148 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:16:12 crc kubenswrapper[5102]: I0123 07:16:12.196861 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8","Type":"ContainerStarted","Data":"35ad746a4ac3dd7283b65af8937ae4c8fe6e3a642b36302c9198f96bfaae34cb"} Jan 23 07:16:12 crc kubenswrapper[5102]: I0123 07:16:12.213302 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerStarted","Data":"f0f61f561ed55e070cb9198ec7807aeb68701fbf61e7d471ee0c376f0af27da1"} Jan 23 07:16:13 crc kubenswrapper[5102]: I0123 07:16:13.226376 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8","Type":"ContainerStarted","Data":"38ba8da046ac14dc360b77fb7112dee42133d1a68989da117321421af10dcea2"} Jan 23 07:16:13 crc kubenswrapper[5102]: I0123 07:16:13.230472 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerStarted","Data":"3b04b65b8cd8edb865c46578b256807c11c47255c047eb9113a66bdc7991ada1"} Jan 23 07:16:13 crc kubenswrapper[5102]: I0123 07:16:13.670448 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.023087 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-86d84bb977-w99l4"] Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.023632 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-86d84bb977-w99l4" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-api" containerID="cri-o://424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387" gracePeriod=30 Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.024145 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-86d84bb977-w99l4" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-httpd" containerID="cri-o://11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e" gracePeriod=30 Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.054823 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-795454f649-697pp"] Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.056229 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.094688 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-795454f649-697pp"] Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.137277 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-86d84bb977-w99l4" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.155:9696/\": read tcp 10.217.0.2:36678->10.217.0.155:9696: read: connection reset by peer" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.205153 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-internal-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.205242 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-combined-ca-bundle\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.205495 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lzkdq\" (UniqueName: \"kubernetes.io/projected/76933dbd-cd7b-47f6-a8af-d216e0413bb7-kube-api-access-lzkdq\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.205807 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-config\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.205933 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-httpd-config\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.206164 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-ovndb-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.206212 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-public-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.246010 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerStarted","Data":"3ddba53ff6c46c602ed4684962f0cba881c97ccaa05dc9a08f07072573bc3515"} Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.246214 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.248047 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8","Type":"ContainerStarted","Data":"a4b253153a5b0ae4b7304fc69166a78bdc78f9b33184fefd123a47d6a29e02a7"} Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.248853 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/cinder-api-0" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.272228 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.593343075 podStartE2EDuration="5.272202494s" podCreationTimestamp="2026-01-23 07:16:09 +0000 UTC" firstStartedPulling="2026-01-23 07:16:10.14361636 +0000 UTC m=+1320.963965325" lastFinishedPulling="2026-01-23 07:16:13.822475769 +0000 UTC m=+1324.642824744" observedRunningTime="2026-01-23 07:16:14.268746877 +0000 UTC m=+1325.089095872" watchObservedRunningTime="2026-01-23 07:16:14.272202494 +0000 UTC m=+1325.092551469" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.297708 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-api-0" podStartSLOduration=3.297677356 podStartE2EDuration="3.297677356s" podCreationTimestamp="2026-01-23 07:16:11 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:14.290870465 +0000 UTC m=+1325.111219450" watchObservedRunningTime="2026-01-23 07:16:14.297677356 +0000 UTC m=+1325.118026331" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.307993 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-internal-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.308069 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-combined-ca-bundle\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.308103 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lzkdq\" (UniqueName: \"kubernetes.io/projected/76933dbd-cd7b-47f6-a8af-d216e0413bb7-kube-api-access-lzkdq\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.308140 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-config\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.308165 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-httpd-config\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.308213 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-ovndb-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.308238 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-public-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.315054 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-internal-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.315366 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-combined-ca-bundle\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.315722 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-httpd-config\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.316701 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-public-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.317762 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-config\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.320029 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-ovndb-tls-certs\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.330188 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lzkdq\" (UniqueName: \"kubernetes.io/projected/76933dbd-cd7b-47f6-a8af-d216e0413bb7-kube-api-access-lzkdq\") pod \"neutron-795454f649-697pp\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.386600 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:14 crc kubenswrapper[5102]: I0123 07:16:14.965795 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-795454f649-697pp"] Jan 23 07:16:15 crc kubenswrapper[5102]: I0123 07:16:15.264316 5102 generic.go:334] "Generic (PLEG): container finished" podID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerID="11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e" exitCode=0 Jan 23 07:16:15 crc kubenswrapper[5102]: I0123 07:16:15.264415 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86d84bb977-w99l4" event={"ID":"b3cd0058-0aaf-4628-8451-91a9b48925a1","Type":"ContainerDied","Data":"11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e"} Jan 23 07:16:15 crc kubenswrapper[5102]: I0123 07:16:15.271804 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-795454f649-697pp" event={"ID":"76933dbd-cd7b-47f6-a8af-d216e0413bb7","Type":"ContainerStarted","Data":"c7b283157687d9e0d560f59da8badfd97039798e26a22723de1962d1f09fe7f1"} Jan 23 07:16:15 crc kubenswrapper[5102]: I0123 07:16:15.965716 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.049768 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-tpx4p"] Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.050389 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" podUID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerName="dnsmasq-dns" containerID="cri-o://374e26f309c1a6a573b1c2bf276c756c491501f820c1e4306fd56a9acfafff0b" gracePeriod=10 Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.304664 5102 generic.go:334] "Generic (PLEG): container finished" podID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerID="374e26f309c1a6a573b1c2bf276c756c491501f820c1e4306fd56a9acfafff0b" exitCode=0 Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.304767 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" event={"ID":"1c19dfac-bb3d-49f4-9296-5785c0e30ef7","Type":"ContainerDied","Data":"374e26f309c1a6a573b1c2bf276c756c491501f820c1e4306fd56a9acfafff0b"} Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.310245 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-795454f649-697pp" event={"ID":"76933dbd-cd7b-47f6-a8af-d216e0413bb7","Type":"ContainerStarted","Data":"7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31"} Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.310323 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-795454f649-697pp" event={"ID":"76933dbd-cd7b-47f6-a8af-d216e0413bb7","Type":"ContainerStarted","Data":"a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba"} Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.310512 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.337370 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/neutron-795454f649-697pp" podStartSLOduration=2.337353538 podStartE2EDuration="2.337353538s" podCreationTimestamp="2026-01-23 07:16:14 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:16.335554311 +0000 UTC m=+1327.155903286" watchObservedRunningTime="2026-01-23 07:16:16.337353538 +0000 UTC m=+1327.157702513" Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.356949 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.450261 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.797335 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.907297 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wvgq9\" (UniqueName: \"kubernetes.io/projected/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-kube-api-access-wvgq9\") pod \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.908702 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-sb\") pod \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.908784 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-swift-storage-0\") pod \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.908839 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-config\") pod \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.909037 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-nb\") pod \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.909090 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-svc\") pod \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\" (UID: \"1c19dfac-bb3d-49f4-9296-5785c0e30ef7\") " Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.941782 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-kube-api-access-wvgq9" (OuterVolumeSpecName: "kube-api-access-wvgq9") pod "1c19dfac-bb3d-49f4-9296-5785c0e30ef7" (UID: "1c19dfac-bb3d-49f4-9296-5785c0e30ef7"). InnerVolumeSpecName "kube-api-access-wvgq9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:16 crc kubenswrapper[5102]: I0123 07:16:16.987469 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "1c19dfac-bb3d-49f4-9296-5785c0e30ef7" (UID: "1c19dfac-bb3d-49f4-9296-5785c0e30ef7"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:16.998975 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "1c19dfac-bb3d-49f4-9296-5785c0e30ef7" (UID: "1c19dfac-bb3d-49f4-9296-5785c0e30ef7"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.009144 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-config" (OuterVolumeSpecName: "config") pod "1c19dfac-bb3d-49f4-9296-5785c0e30ef7" (UID: "1c19dfac-bb3d-49f4-9296-5785c0e30ef7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.012598 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.012623 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wvgq9\" (UniqueName: \"kubernetes.io/projected/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-kube-api-access-wvgq9\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.012634 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.012645 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.028829 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "1c19dfac-bb3d-49f4-9296-5785c0e30ef7" (UID: "1c19dfac-bb3d-49f4-9296-5785c0e30ef7"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.048449 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "1c19dfac-bb3d-49f4-9296-5785c0e30ef7" (UID: "1c19dfac-bb3d-49f4-9296-5785c0e30ef7"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.114332 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.114589 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/1c19dfac-bb3d-49f4-9296-5785c0e30ef7-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.219438 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-86d84bb977-w99l4" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.155:9696/\": dial tcp 10.217.0.155:9696: connect: connection refused" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.321254 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="cinder-scheduler" containerID="cri-o://14009590c3814bccb1391feb28993865b5dc0b1fe2def076b2d421d7ae15681f" gracePeriod=30 Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.321566 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.327624 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-66cdd4b5b5-tpx4p" event={"ID":"1c19dfac-bb3d-49f4-9296-5785c0e30ef7","Type":"ContainerDied","Data":"065fb01ed0695b220e02202d91a02ad5783736d08c889d9a1d2d61bcaa239229"} Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.327671 5102 scope.go:117] "RemoveContainer" containerID="374e26f309c1a6a573b1c2bf276c756c491501f820c1e4306fd56a9acfafff0b" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.327865 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="probe" containerID="cri-o://e9b438f1d5c455cfad45873923ead3b76e54f7e9650284708b9e5741ed1916fd" gracePeriod=30 Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.359470 5102 scope.go:117] "RemoveContainer" containerID="418ec834bc43dcd3b6f8f4ac43e21ff0ae20f8aa8627023ec70b7e6a7eafe633" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.360786 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-tpx4p"] Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.375118 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-66cdd4b5b5-tpx4p"] Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.608875 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" path="/var/lib/kubelet/pods/1c19dfac-bb3d-49f4-9296-5785c0e30ef7/volumes" Jan 23 07:16:17 crc kubenswrapper[5102]: I0123 07:16:17.884545 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:16:18 crc kubenswrapper[5102]: I0123 07:16:18.048259 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:16:18 crc kubenswrapper[5102]: I0123 07:16:18.336416 5102 generic.go:334] "Generic (PLEG): container finished" podID="0911dabf-0740-44ff-96ad-db85e3514a73" containerID="e9b438f1d5c455cfad45873923ead3b76e54f7e9650284708b9e5741ed1916fd" exitCode=0 Jan 23 07:16:18 crc kubenswrapper[5102]: I0123 07:16:18.336485 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0911dabf-0740-44ff-96ad-db85e3514a73","Type":"ContainerDied","Data":"e9b438f1d5c455cfad45873923ead3b76e54f7e9650284708b9e5741ed1916fd"} Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.357194 5102 generic.go:334] "Generic (PLEG): container finished" podID="0911dabf-0740-44ff-96ad-db85e3514a73" containerID="14009590c3814bccb1391feb28993865b5dc0b1fe2def076b2d421d7ae15681f" exitCode=0 Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.357233 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0911dabf-0740-44ff-96ad-db85e3514a73","Type":"ContainerDied","Data":"14009590c3814bccb1391feb28993865b5dc0b1fe2def076b2d421d7ae15681f"} Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.477251 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.737128 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.867121 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data\") pod \"0911dabf-0740-44ff-96ad-db85e3514a73\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.867179 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0911dabf-0740-44ff-96ad-db85e3514a73-etc-machine-id\") pod \"0911dabf-0740-44ff-96ad-db85e3514a73\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.867345 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-scripts\") pod \"0911dabf-0740-44ff-96ad-db85e3514a73\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.867414 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data-custom\") pod \"0911dabf-0740-44ff-96ad-db85e3514a73\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.867452 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-combined-ca-bundle\") pod \"0911dabf-0740-44ff-96ad-db85e3514a73\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.867477 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j96lf\" (UniqueName: \"kubernetes.io/projected/0911dabf-0740-44ff-96ad-db85e3514a73-kube-api-access-j96lf\") pod \"0911dabf-0740-44ff-96ad-db85e3514a73\" (UID: \"0911dabf-0740-44ff-96ad-db85e3514a73\") " Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.868783 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0911dabf-0740-44ff-96ad-db85e3514a73-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "0911dabf-0740-44ff-96ad-db85e3514a73" (UID: "0911dabf-0740-44ff-96ad-db85e3514a73"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.872730 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/openstackclient"] Jan 23 07:16:19 crc kubenswrapper[5102]: E0123 07:16:19.873230 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="cinder-scheduler" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.873251 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="cinder-scheduler" Jan 23 07:16:19 crc kubenswrapper[5102]: E0123 07:16:19.874454 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerName="init" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.874469 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerName="init" Jan 23 07:16:19 crc kubenswrapper[5102]: E0123 07:16:19.874480 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="probe" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.874487 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="probe" Jan 23 07:16:19 crc kubenswrapper[5102]: E0123 07:16:19.874502 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerName="dnsmasq-dns" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.874508 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerName="dnsmasq-dns" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.874726 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c19dfac-bb3d-49f4-9296-5785c0e30ef7" containerName="dnsmasq-dns" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.874737 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="cinder-scheduler" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.874753 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" containerName="probe" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.890762 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.892797 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.894685 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstackclient-openstackclient-dockercfg-btpz6" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.896682 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openstack"/"openstack-config" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.898918 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-config-secret" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.909185 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "0911dabf-0740-44ff-96ad-db85e3514a73" (UID: "0911dabf-0740-44ff-96ad-db85e3514a73"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.909369 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-scripts" (OuterVolumeSpecName: "scripts") pod "0911dabf-0740-44ff-96ad-db85e3514a73" (UID: "0911dabf-0740-44ff-96ad-db85e3514a73"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.918739 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0911dabf-0740-44ff-96ad-db85e3514a73-kube-api-access-j96lf" (OuterVolumeSpecName: "kube-api-access-j96lf") pod "0911dabf-0740-44ff-96ad-db85e3514a73" (UID: "0911dabf-0740-44ff-96ad-db85e3514a73"). InnerVolumeSpecName "kube-api-access-j96lf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.975904 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.976092 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config-secret\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.976368 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.976450 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrfwk\" (UniqueName: \"kubernetes.io/projected/6d355347-569d-4082-b9fd-66d286ef59be-kube-api-access-zrfwk\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.976590 5102 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/0911dabf-0740-44ff-96ad-db85e3514a73-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.976603 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.976612 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.976639 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j96lf\" (UniqueName: \"kubernetes.io/projected/0911dabf-0740-44ff-96ad-db85e3514a73-kube-api-access-j96lf\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.978346 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:16:19 crc kubenswrapper[5102]: I0123 07:16:19.994903 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0911dabf-0740-44ff-96ad-db85e3514a73" (UID: "0911dabf-0740-44ff-96ad-db85e3514a73"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.015468 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data" (OuterVolumeSpecName: "config-data") pod "0911dabf-0740-44ff-96ad-db85e3514a73" (UID: "0911dabf-0740-44ff-96ad-db85e3514a73"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.077516 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.077802 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrfwk\" (UniqueName: \"kubernetes.io/projected/6d355347-569d-4082-b9fd-66d286ef59be-kube-api-access-zrfwk\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.077849 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.077894 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config-secret\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.077962 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.077972 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/0911dabf-0740-44ff-96ad-db85e3514a73-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.079060 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.081432 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-combined-ca-bundle\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.081956 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config-secret\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.095415 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrfwk\" (UniqueName: \"kubernetes.io/projected/6d355347-569d-4082-b9fd-66d286ef59be-kube-api-access-zrfwk\") pod \"openstackclient\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.179463 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-public-tls-certs\") pod \"b3cd0058-0aaf-4628-8451-91a9b48925a1\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.179609 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-combined-ca-bundle\") pod \"b3cd0058-0aaf-4628-8451-91a9b48925a1\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.179644 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-httpd-config\") pod \"b3cd0058-0aaf-4628-8451-91a9b48925a1\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.179682 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-config\") pod \"b3cd0058-0aaf-4628-8451-91a9b48925a1\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.179732 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nxxqm\" (UniqueName: \"kubernetes.io/projected/b3cd0058-0aaf-4628-8451-91a9b48925a1-kube-api-access-nxxqm\") pod \"b3cd0058-0aaf-4628-8451-91a9b48925a1\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.180461 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-ovndb-tls-certs\") pod \"b3cd0058-0aaf-4628-8451-91a9b48925a1\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.180591 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-internal-tls-certs\") pod \"b3cd0058-0aaf-4628-8451-91a9b48925a1\" (UID: \"b3cd0058-0aaf-4628-8451-91a9b48925a1\") " Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.184568 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "b3cd0058-0aaf-4628-8451-91a9b48925a1" (UID: "b3cd0058-0aaf-4628-8451-91a9b48925a1"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.184680 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3cd0058-0aaf-4628-8451-91a9b48925a1-kube-api-access-nxxqm" (OuterVolumeSpecName: "kube-api-access-nxxqm") pod "b3cd0058-0aaf-4628-8451-91a9b48925a1" (UID: "b3cd0058-0aaf-4628-8451-91a9b48925a1"). InnerVolumeSpecName "kube-api-access-nxxqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.239318 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-config" (OuterVolumeSpecName: "config") pod "b3cd0058-0aaf-4628-8451-91a9b48925a1" (UID: "b3cd0058-0aaf-4628-8451-91a9b48925a1"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.244701 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b3cd0058-0aaf-4628-8451-91a9b48925a1" (UID: "b3cd0058-0aaf-4628-8451-91a9b48925a1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.252231 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.269782 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "b3cd0058-0aaf-4628-8451-91a9b48925a1" (UID: "b3cd0058-0aaf-4628-8451-91a9b48925a1"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.279073 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "b3cd0058-0aaf-4628-8451-91a9b48925a1" (UID: "b3cd0058-0aaf-4628-8451-91a9b48925a1"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.283892 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.283928 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.283939 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.283950 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.283962 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.283976 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nxxqm\" (UniqueName: \"kubernetes.io/projected/b3cd0058-0aaf-4628-8451-91a9b48925a1-kube-api-access-nxxqm\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.310944 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "b3cd0058-0aaf-4628-8451-91a9b48925a1" (UID: "b3cd0058-0aaf-4628-8451-91a9b48925a1"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.391040 5102 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/b3cd0058-0aaf-4628-8451-91a9b48925a1-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.391941 5102 generic.go:334] "Generic (PLEG): container finished" podID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerID="424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387" exitCode=0 Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.392039 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86d84bb977-w99l4" event={"ID":"b3cd0058-0aaf-4628-8451-91a9b48925a1","Type":"ContainerDied","Data":"424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387"} Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.392072 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-86d84bb977-w99l4" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.392101 5102 scope.go:117] "RemoveContainer" containerID="11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.392086 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-86d84bb977-w99l4" event={"ID":"b3cd0058-0aaf-4628-8451-91a9b48925a1","Type":"ContainerDied","Data":"ceba6e70548e27344439cf65f6e7c6810a9d1fb46e75b64e482ab07843570224"} Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.399484 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"0911dabf-0740-44ff-96ad-db85e3514a73","Type":"ContainerDied","Data":"bf9fad402cbc96186801a138ac7dc697efdb859f7c42bed295eece852d48280c"} Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.399577 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.446500 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-86d84bb977-w99l4"] Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.447379 5102 scope.go:117] "RemoveContainer" containerID="424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.456799 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-86d84bb977-w99l4"] Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.464696 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.473234 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.485923 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:20 crc kubenswrapper[5102]: E0123 07:16:20.486516 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-api" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.486631 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-api" Jan 23 07:16:20 crc kubenswrapper[5102]: E0123 07:16:20.486734 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-httpd" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.486798 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-httpd" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.487017 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-api" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.487098 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" containerName="neutron-httpd" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.488154 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.493186 5102 scope.go:117] "RemoveContainer" containerID="11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.493311 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2nc6p\" (UniqueName: \"kubernetes.io/projected/4c08d6c5-8422-4da2-b8f3-2760dbebc521-kube-api-access-2nc6p\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.493371 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.493434 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.493460 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-scripts\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.493495 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.493555 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c08d6c5-8422-4da2-b8f3-2760dbebc521-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: E0123 07:16:20.494903 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e\": container with ID starting with 11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e not found: ID does not exist" containerID="11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.494954 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e"} err="failed to get container status \"11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e\": rpc error: code = NotFound desc = could not find container \"11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e\": container with ID starting with 11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e not found: ID does not exist" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.494983 5102 scope.go:117] "RemoveContainer" containerID="424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.494927 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-scheduler-config-data" Jan 23 07:16:20 crc kubenswrapper[5102]: E0123 07:16:20.496255 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387\": container with ID starting with 424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387 not found: ID does not exist" containerID="424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.496289 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387"} err="failed to get container status \"424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387\": rpc error: code = NotFound desc = could not find container \"424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387\": container with ID starting with 424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387 not found: ID does not exist" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.496310 5102 scope.go:117] "RemoveContainer" containerID="e9b438f1d5c455cfad45873923ead3b76e54f7e9650284708b9e5741ed1916fd" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.499134 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.539355 5102 scope.go:117] "RemoveContainer" containerID="14009590c3814bccb1391feb28993865b5dc0b1fe2def076b2d421d7ae15681f" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.595821 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2nc6p\" (UniqueName: \"kubernetes.io/projected/4c08d6c5-8422-4da2-b8f3-2760dbebc521-kube-api-access-2nc6p\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.595892 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.595998 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.596020 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-scripts\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.596056 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.596129 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c08d6c5-8422-4da2-b8f3-2760dbebc521-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.596231 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c08d6c5-8422-4da2-b8f3-2760dbebc521-etc-machine-id\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.600973 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data-custom\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.600975 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-scripts\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.605844 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-combined-ca-bundle\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.606744 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.612158 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2nc6p\" (UniqueName: \"kubernetes.io/projected/4c08d6c5-8422-4da2-b8f3-2760dbebc521-kube-api-access-2nc6p\") pod \"cinder-scheduler-0\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " pod="openstack/cinder-scheduler-0" Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.711433 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/openstackclient"] Jan 23 07:16:20 crc kubenswrapper[5102]: I0123 07:16:20.813353 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:16:21 crc kubenswrapper[5102]: I0123 07:16:21.296503 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:16:21 crc kubenswrapper[5102]: I0123 07:16:21.415104 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6d355347-569d-4082-b9fd-66d286ef59be","Type":"ContainerStarted","Data":"661357e88412cf458d1f35437984ce0e0d39dd69b29aa25dc357a77916bb8b59"} Jan 23 07:16:21 crc kubenswrapper[5102]: I0123 07:16:21.418004 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c08d6c5-8422-4da2-b8f3-2760dbebc521","Type":"ContainerStarted","Data":"497109b2c03c3b0d9b1d174cd2e20c0402ffd455df531f23e9b86427e785e888"} Jan 23 07:16:21 crc kubenswrapper[5102]: I0123 07:16:21.611229 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0911dabf-0740-44ff-96ad-db85e3514a73" path="/var/lib/kubelet/pods/0911dabf-0740-44ff-96ad-db85e3514a73/volumes" Jan 23 07:16:21 crc kubenswrapper[5102]: I0123 07:16:21.612192 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3cd0058-0aaf-4628-8451-91a9b48925a1" path="/var/lib/kubelet/pods/b3cd0058-0aaf-4628-8451-91a9b48925a1/volumes" Jan 23 07:16:22 crc kubenswrapper[5102]: I0123 07:16:22.464835 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c08d6c5-8422-4da2-b8f3-2760dbebc521","Type":"ContainerStarted","Data":"fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb"} Jan 23 07:16:23 crc kubenswrapper[5102]: E0123 07:16:23.029093 5102 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0911dabf_0740_44ff_96ad_db85e3514a73.slice/crio-bf9fad402cbc96186801a138ac7dc697efdb859f7c42bed295eece852d48280c\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0911dabf_0740_44ff_96ad_db85e3514a73.slice/crio-conmon-e9b438f1d5c455cfad45873923ead3b76e54f7e9650284708b9e5741ed1916fd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c19dfac_bb3d_49f4_9296_5785c0e30ef7.slice/crio-065fb01ed0695b220e02202d91a02ad5783736d08c889d9a1d2d61bcaa239229\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3d5faa20_dfb4_4dc5_b4e0_21d0fef34cc1.slice/crio-13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0911dabf_0740_44ff_96ad_db85e3514a73.slice/crio-conmon-14009590c3814bccb1391feb28993865b5dc0b1fe2def076b2d421d7ae15681f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c19dfac_bb3d_49f4_9296_5785c0e30ef7.slice\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3cd0058_0aaf_4628_8451_91a9b48925a1.slice/crio-ceba6e70548e27344439cf65f6e7c6810a9d1fb46e75b64e482ab07843570224\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3cd0058_0aaf_4628_8451_91a9b48925a1.slice/crio-conmon-424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0911dabf_0740_44ff_96ad_db85e3514a73.slice/crio-e9b438f1d5c455cfad45873923ead3b76e54f7e9650284708b9e5741ed1916fd.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod0911dabf_0740_44ff_96ad_db85e3514a73.slice/crio-14009590c3814bccb1391feb28993865b5dc0b1fe2def076b2d421d7ae15681f.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c19dfac_bb3d_49f4_9296_5785c0e30ef7.slice/crio-conmon-374e26f309c1a6a573b1c2bf276c756c491501f820c1e4306fd56a9acfafff0b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3cd0058_0aaf_4628_8451_91a9b48925a1.slice/crio-conmon-11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3cd0058_0aaf_4628_8451_91a9b48925a1.slice/crio-11f424e53dec7431aa5baee823b7dd5d77230f998477f01e71ab5273983e644e.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod1c19dfac_bb3d_49f4_9296_5785c0e30ef7.slice/crio-374e26f309c1a6a573b1c2bf276c756c491501f820c1e4306fd56a9acfafff0b.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podb3cd0058_0aaf_4628_8451_91a9b48925a1.slice/crio-424f0314d3ee6940b9614800e8bbed76736dc80e7da883128386de6f691aa387.scope\": RecentStats: unable to find data in memory cache]" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.218965 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.356232 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7qg8\" (UniqueName: \"kubernetes.io/projected/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-kube-api-access-v7qg8\") pod \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.356315 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-logs\") pod \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.356383 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data\") pod \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.356420 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-combined-ca-bundle\") pod \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.356593 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data-custom\") pod \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\" (UID: \"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1\") " Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.356887 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-logs" (OuterVolumeSpecName: "logs") pod "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" (UID: "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.370706 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-kube-api-access-v7qg8" (OuterVolumeSpecName: "kube-api-access-v7qg8") pod "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" (UID: "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1"). InnerVolumeSpecName "kube-api-access-v7qg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.375845 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" (UID: "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.414754 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data" (OuterVolumeSpecName: "config-data") pod "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" (UID: "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.437736 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" (UID: "3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.458963 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.459000 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7qg8\" (UniqueName: \"kubernetes.io/projected/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-kube-api-access-v7qg8\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.459014 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.459022 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.459030 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.489850 5102 generic.go:334] "Generic (PLEG): container finished" podID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerID="13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f" exitCode=137 Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.489902 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-679f9d9c44-mmp9r" event={"ID":"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1","Type":"ContainerDied","Data":"13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f"} Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.489967 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-679f9d9c44-mmp9r" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.489971 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-679f9d9c44-mmp9r" event={"ID":"3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1","Type":"ContainerDied","Data":"2bbe42b18e5260d1dd5c274d62434ecba80f4a25db0dfd21e62f8aab0f911e30"} Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.489990 5102 scope.go:117] "RemoveContainer" containerID="13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.501529 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c08d6c5-8422-4da2-b8f3-2760dbebc521","Type":"ContainerStarted","Data":"d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44"} Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.526838 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/cinder-scheduler-0" podStartSLOduration=3.526818189 podStartE2EDuration="3.526818189s" podCreationTimestamp="2026-01-23 07:16:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:23.519213542 +0000 UTC m=+1334.339562527" watchObservedRunningTime="2026-01-23 07:16:23.526818189 +0000 UTC m=+1334.347167164" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.527734 5102 scope.go:117] "RemoveContainer" containerID="fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.540684 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-679f9d9c44-mmp9r"] Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.547059 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-679f9d9c44-mmp9r"] Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.562489 5102 scope.go:117] "RemoveContainer" containerID="13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f" Jan 23 07:16:23 crc kubenswrapper[5102]: E0123 07:16:23.565154 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f\": container with ID starting with 13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f not found: ID does not exist" containerID="13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.565191 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f"} err="failed to get container status \"13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f\": rpc error: code = NotFound desc = could not find container \"13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f\": container with ID starting with 13be077641659410db803fd1059e59015022b8da9c35610bc7e598a61f21bf9f not found: ID does not exist" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.565215 5102 scope.go:117] "RemoveContainer" containerID="fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2" Jan 23 07:16:23 crc kubenswrapper[5102]: E0123 07:16:23.565570 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2\": container with ID starting with fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2 not found: ID does not exist" containerID="fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.565634 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2"} err="failed to get container status \"fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2\": rpc error: code = NotFound desc = could not find container \"fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2\": container with ID starting with fc82d97c322e5943d7dd830a9fd8d874497bc3154e3fed895cff3090617632f2 not found: ID does not exist" Jan 23 07:16:23 crc kubenswrapper[5102]: I0123 07:16:23.612384 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" path="/var/lib/kubelet/pods/3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1/volumes" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.129096 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/cinder-api-0" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.873573 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-db-create-2dzhq"] Jan 23 07:16:24 crc kubenswrapper[5102]: E0123 07:16:24.874240 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api-log" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.874255 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api-log" Jan 23 07:16:24 crc kubenswrapper[5102]: E0123 07:16:24.874269 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.874276 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.874460 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.874485 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api-log" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.875056 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:24 crc kubenswrapper[5102]: I0123 07:16:24.902229 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2dzhq"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.005033 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c92486fb-6bab-4681-b42a-11b5631b265f-operator-scripts\") pod \"nova-api-db-create-2dzhq\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.005233 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lnnh2\" (UniqueName: \"kubernetes.io/projected/c92486fb-6bab-4681-b42a-11b5631b265f-kube-api-access-lnnh2\") pod \"nova-api-db-create-2dzhq\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.068897 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-db-create-89jsr"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.070123 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.087607 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-89jsr"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.104146 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-8d62-account-create-update-7r467"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.105364 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.106959 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lnnh2\" (UniqueName: \"kubernetes.io/projected/c92486fb-6bab-4681-b42a-11b5631b265f-kube-api-access-lnnh2\") pod \"nova-api-db-create-2dzhq\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.107045 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c92486fb-6bab-4681-b42a-11b5631b265f-operator-scripts\") pod \"nova-api-db-create-2dzhq\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.107749 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c92486fb-6bab-4681-b42a-11b5631b265f-operator-scripts\") pod \"nova-api-db-create-2dzhq\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.113177 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.132965 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lnnh2\" (UniqueName: \"kubernetes.io/projected/c92486fb-6bab-4681-b42a-11b5631b265f-kube-api-access-lnnh2\") pod \"nova-api-db-create-2dzhq\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.150257 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-8d62-account-create-update-7r467"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.190640 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.208830 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27b98baa-da03-46ae-8af3-ca99483f0007-operator-scripts\") pod \"nova-api-8d62-account-create-update-7r467\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.208977 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f46dg\" (UniqueName: \"kubernetes.io/projected/d2aadd3e-7281-49ae-88b5-611993646185-kube-api-access-f46dg\") pod \"nova-cell0-db-create-89jsr\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.209018 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2aadd3e-7281-49ae-88b5-611993646185-operator-scripts\") pod \"nova-cell0-db-create-89jsr\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.209038 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-658pg\" (UniqueName: \"kubernetes.io/projected/27b98baa-da03-46ae-8af3-ca99483f0007-kube-api-access-658pg\") pod \"nova-api-8d62-account-create-update-7r467\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.275589 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-db-create-ppr5f"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.276975 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.289373 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-wz8lf"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.290668 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.293857 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.306146 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-ppr5f"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.310294 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f46dg\" (UniqueName: \"kubernetes.io/projected/d2aadd3e-7281-49ae-88b5-611993646185-kube-api-access-f46dg\") pod \"nova-cell0-db-create-89jsr\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.310353 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2aadd3e-7281-49ae-88b5-611993646185-operator-scripts\") pod \"nova-cell0-db-create-89jsr\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.310375 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-658pg\" (UniqueName: \"kubernetes.io/projected/27b98baa-da03-46ae-8af3-ca99483f0007-kube-api-access-658pg\") pod \"nova-api-8d62-account-create-update-7r467\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.310406 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27b98baa-da03-46ae-8af3-ca99483f0007-operator-scripts\") pod \"nova-api-8d62-account-create-update-7r467\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.311169 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27b98baa-da03-46ae-8af3-ca99483f0007-operator-scripts\") pod \"nova-api-8d62-account-create-update-7r467\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.311894 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2aadd3e-7281-49ae-88b5-611993646185-operator-scripts\") pod \"nova-cell0-db-create-89jsr\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.317607 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-wz8lf"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.337116 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f46dg\" (UniqueName: \"kubernetes.io/projected/d2aadd3e-7281-49ae-88b5-611993646185-kube-api-access-f46dg\") pod \"nova-cell0-db-create-89jsr\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.357380 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-658pg\" (UniqueName: \"kubernetes.io/projected/27b98baa-da03-46ae-8af3-ca99483f0007-kube-api-access-658pg\") pod \"nova-api-8d62-account-create-update-7r467\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.386783 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.414670 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4806efa3-cb85-4d29-956d-63bf181c16be-operator-scripts\") pod \"nova-cell0-f3ca-account-create-update-wz8lf\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.415010 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b72030e8-a814-47ec-b0f2-edd8b146ff7a-operator-scripts\") pod \"nova-cell1-db-create-ppr5f\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.415158 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rftbg\" (UniqueName: \"kubernetes.io/projected/b72030e8-a814-47ec-b0f2-edd8b146ff7a-kube-api-access-rftbg\") pod \"nova-cell1-db-create-ppr5f\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.415190 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4cncz\" (UniqueName: \"kubernetes.io/projected/4806efa3-cb85-4d29-956d-63bf181c16be-kube-api-access-4cncz\") pod \"nova-cell0-f3ca-account-create-update-wz8lf\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.431031 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.468514 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-b157-account-create-update-5dwdj"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.469678 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.474085 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.487468 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-5dwdj"] Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.518886 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4806efa3-cb85-4d29-956d-63bf181c16be-operator-scripts\") pod \"nova-cell0-f3ca-account-create-update-wz8lf\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.518966 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b72030e8-a814-47ec-b0f2-edd8b146ff7a-operator-scripts\") pod \"nova-cell1-db-create-ppr5f\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.519107 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rftbg\" (UniqueName: \"kubernetes.io/projected/b72030e8-a814-47ec-b0f2-edd8b146ff7a-kube-api-access-rftbg\") pod \"nova-cell1-db-create-ppr5f\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.519132 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4cncz\" (UniqueName: \"kubernetes.io/projected/4806efa3-cb85-4d29-956d-63bf181c16be-kube-api-access-4cncz\") pod \"nova-cell0-f3ca-account-create-update-wz8lf\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.519831 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4806efa3-cb85-4d29-956d-63bf181c16be-operator-scripts\") pod \"nova-cell0-f3ca-account-create-update-wz8lf\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.519879 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b72030e8-a814-47ec-b0f2-edd8b146ff7a-operator-scripts\") pod \"nova-cell1-db-create-ppr5f\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.545862 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4cncz\" (UniqueName: \"kubernetes.io/projected/4806efa3-cb85-4d29-956d-63bf181c16be-kube-api-access-4cncz\") pod \"nova-cell0-f3ca-account-create-update-wz8lf\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.553827 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rftbg\" (UniqueName: \"kubernetes.io/projected/b72030e8-a814-47ec-b0f2-edd8b146ff7a-kube-api-access-rftbg\") pod \"nova-cell1-db-create-ppr5f\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.622737 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-operator-scripts\") pod \"nova-cell1-b157-account-create-update-5dwdj\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.622890 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m7mzt\" (UniqueName: \"kubernetes.io/projected/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-kube-api-access-m7mzt\") pod \"nova-cell1-b157-account-create-update-5dwdj\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.633898 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.727736 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-operator-scripts\") pod \"nova-cell1-b157-account-create-update-5dwdj\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.727996 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m7mzt\" (UniqueName: \"kubernetes.io/projected/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-kube-api-access-m7mzt\") pod \"nova-cell1-b157-account-create-update-5dwdj\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.729068 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-operator-scripts\") pod \"nova-cell1-b157-account-create-update-5dwdj\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.733072 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.759070 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m7mzt\" (UniqueName: \"kubernetes.io/projected/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-kube-api-access-m7mzt\") pod \"nova-cell1-b157-account-create-update-5dwdj\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.802015 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.813670 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/cinder-scheduler-0" Jan 23 07:16:25 crc kubenswrapper[5102]: I0123 07:16:25.821455 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-db-create-2dzhq"] Jan 23 07:16:26 crc kubenswrapper[5102]: W0123 07:16:26.014387 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podc92486fb_6bab_4681_b42a_11b5631b265f.slice/crio-34630c175128889931f63f201e795256ce73e4954d82e95f02935d427ceb376d WatchSource:0}: Error finding container 34630c175128889931f63f201e795256ce73e4954d82e95f02935d427ceb376d: Status 404 returned error can't find the container with id 34630c175128889931f63f201e795256ce73e4954d82e95f02935d427ceb376d Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.048204 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-8d62-account-create-update-7r467"] Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.091332 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-db-create-89jsr"] Jan 23 07:16:26 crc kubenswrapper[5102]: W0123 07:16:26.123233 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod27b98baa_da03_46ae_8af3_ca99483f0007.slice/crio-3416eebcad1f47b12a65fcc610e4bed8dc9a91820caa9e4c679c11d95448561d WatchSource:0}: Error finding container 3416eebcad1f47b12a65fcc610e4bed8dc9a91820caa9e4c679c11d95448561d: Status 404 returned error can't find the container with id 3416eebcad1f47b12a65fcc610e4bed8dc9a91820caa9e4c679c11d95448561d Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.448832 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-db-create-ppr5f"] Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.540900 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-wz8lf"] Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.557068 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8d62-account-create-update-7r467" event={"ID":"27b98baa-da03-46ae-8af3-ca99483f0007","Type":"ContainerStarted","Data":"773c01238764a0ed41158b255aa3c581891c36cf9b44486fe6ee3bcd08906b5d"} Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.557132 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8d62-account-create-update-7r467" event={"ID":"27b98baa-da03-46ae-8af3-ca99483f0007","Type":"ContainerStarted","Data":"3416eebcad1f47b12a65fcc610e4bed8dc9a91820caa9e4c679c11d95448561d"} Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.561734 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ppr5f" event={"ID":"b72030e8-a814-47ec-b0f2-edd8b146ff7a","Type":"ContainerStarted","Data":"53422378819b23265b94a6c95a355f2d7c9cce7be8833e63917ce571a074d5b6"} Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.564901 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-89jsr" event={"ID":"d2aadd3e-7281-49ae-88b5-611993646185","Type":"ContainerStarted","Data":"80130ba290cd20c0ccdae1ffb0c1dcbd426c8d074791abc7cf555bdeaa367da6"} Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.564938 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-89jsr" event={"ID":"d2aadd3e-7281-49ae-88b5-611993646185","Type":"ContainerStarted","Data":"d21f7f22477557f8a33c6cb558fe805e601bb750452147fada76777f6826920e"} Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.568635 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2dzhq" event={"ID":"c92486fb-6bab-4681-b42a-11b5631b265f","Type":"ContainerStarted","Data":"c91a8e896c4e37680e0983957da81be367f9c32a63a4da75e84a99d0e3b57364"} Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.568669 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2dzhq" event={"ID":"c92486fb-6bab-4681-b42a-11b5631b265f","Type":"ContainerStarted","Data":"34630c175128889931f63f201e795256ce73e4954d82e95f02935d427ceb376d"} Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.580078 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-8d62-account-create-update-7r467" podStartSLOduration=1.58005687 podStartE2EDuration="1.58005687s" podCreationTimestamp="2026-01-23 07:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:26.569992087 +0000 UTC m=+1337.390341062" watchObservedRunningTime="2026-01-23 07:16:26.58005687 +0000 UTC m=+1337.400405845" Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.611533 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-db-create-2dzhq" podStartSLOduration=2.611509449 podStartE2EDuration="2.611509449s" podCreationTimestamp="2026-01-23 07:16:24 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:26.589790732 +0000 UTC m=+1337.410139707" watchObservedRunningTime="2026-01-23 07:16:26.611509449 +0000 UTC m=+1337.431858424" Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.648605 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-db-create-89jsr" podStartSLOduration=1.648584061 podStartE2EDuration="1.648584061s" podCreationTimestamp="2026-01-23 07:16:25 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:26.60386281 +0000 UTC m=+1337.424211785" watchObservedRunningTime="2026-01-23 07:16:26.648584061 +0000 UTC m=+1337.468933036" Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.690188 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-5dwdj"] Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.975372 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/swift-proxy-845d4fc79c-bhsj4"] Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.977743 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.983076 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-public-svc" Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.983595 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-swift-internal-svc" Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.983887 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"swift-proxy-config-data" Jan 23 07:16:26 crc kubenswrapper[5102]: I0123 07:16:26.993934 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-845d4fc79c-bhsj4"] Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.075905 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-etc-swift\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.075949 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-internal-tls-certs\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.075974 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-run-httpd\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.076018 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-config-data\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.076037 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-public-tls-certs\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.076124 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-log-httpd\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.076155 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.076183 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2z5f\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-kube-api-access-k2z5f\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.177765 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-log-httpd\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.177858 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.177926 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2z5f\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-kube-api-access-k2z5f\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.177963 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-etc-swift\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.177986 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-internal-tls-certs\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.178015 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-run-httpd\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.178084 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-config-data\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.178110 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-public-tls-certs\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.178558 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-run-httpd\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.178712 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-log-httpd\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.184442 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.186870 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-internal-tls-certs\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.187568 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-config-data\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.188466 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-etc-swift\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.197238 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-public-tls-certs\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.208329 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2z5f\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-kube-api-access-k2z5f\") pod \"swift-proxy-845d4fc79c-bhsj4\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.305729 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.581514 5102 generic.go:334] "Generic (PLEG): container finished" podID="b72030e8-a814-47ec-b0f2-edd8b146ff7a" containerID="37f4fbbf6a9f611f9265e2e7de99da29e4ce4bdea696864bf8cb79bb4d5056d4" exitCode=0 Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.581613 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ppr5f" event={"ID":"b72030e8-a814-47ec-b0f2-edd8b146ff7a","Type":"ContainerDied","Data":"37f4fbbf6a9f611f9265e2e7de99da29e4ce4bdea696864bf8cb79bb4d5056d4"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.585507 5102 generic.go:334] "Generic (PLEG): container finished" podID="4806efa3-cb85-4d29-956d-63bf181c16be" containerID="4075f0a8cfcd82513e6274dfeabdd2f8728604846eb7a3d2e3942e6e90d51c92" exitCode=0 Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.585610 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" event={"ID":"4806efa3-cb85-4d29-956d-63bf181c16be","Type":"ContainerDied","Data":"4075f0a8cfcd82513e6274dfeabdd2f8728604846eb7a3d2e3942e6e90d51c92"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.585679 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" event={"ID":"4806efa3-cb85-4d29-956d-63bf181c16be","Type":"ContainerStarted","Data":"7b2a619a0418fcdc6852ca47338f17cb8bdb774bd16d394758ceb41b65dce536"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.591821 5102 generic.go:334] "Generic (PLEG): container finished" podID="d2aadd3e-7281-49ae-88b5-611993646185" containerID="80130ba290cd20c0ccdae1ffb0c1dcbd426c8d074791abc7cf555bdeaa367da6" exitCode=0 Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.591911 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-89jsr" event={"ID":"d2aadd3e-7281-49ae-88b5-611993646185","Type":"ContainerDied","Data":"80130ba290cd20c0ccdae1ffb0c1dcbd426c8d074791abc7cf555bdeaa367da6"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.602395 5102 generic.go:334] "Generic (PLEG): container finished" podID="f9dedf34-d437-4ae1-ada2-46f4ad2b0320" containerID="82f639b8e1751ac8d69a9f889bd3bf8fd351aaa0b6dabbd38eb25e15eb84da0a" exitCode=0 Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.609239 5102 generic.go:334] "Generic (PLEG): container finished" podID="c92486fb-6bab-4681-b42a-11b5631b265f" containerID="c91a8e896c4e37680e0983957da81be367f9c32a63a4da75e84a99d0e3b57364" exitCode=0 Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.611454 5102 generic.go:334] "Generic (PLEG): container finished" podID="27b98baa-da03-46ae-8af3-ca99483f0007" containerID="773c01238764a0ed41158b255aa3c581891c36cf9b44486fe6ee3bcd08906b5d" exitCode=0 Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.611757 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b157-account-create-update-5dwdj" event={"ID":"f9dedf34-d437-4ae1-ada2-46f4ad2b0320","Type":"ContainerDied","Data":"82f639b8e1751ac8d69a9f889bd3bf8fd351aaa0b6dabbd38eb25e15eb84da0a"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.611785 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b157-account-create-update-5dwdj" event={"ID":"f9dedf34-d437-4ae1-ada2-46f4ad2b0320","Type":"ContainerStarted","Data":"5709729ee7b8bd603fbf7872333b3b1c8805be0e748ed75f503527bbbaeb52c3"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.611796 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2dzhq" event={"ID":"c92486fb-6bab-4681-b42a-11b5631b265f","Type":"ContainerDied","Data":"c91a8e896c4e37680e0983957da81be367f9c32a63a4da75e84a99d0e3b57364"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.611808 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8d62-account-create-update-7r467" event={"ID":"27b98baa-da03-46ae-8af3-ca99483f0007","Type":"ContainerDied","Data":"773c01238764a0ed41158b255aa3c581891c36cf9b44486fe6ee3bcd08906b5d"} Jan 23 07:16:27 crc kubenswrapper[5102]: I0123 07:16:27.930910 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/swift-proxy-845d4fc79c-bhsj4"] Jan 23 07:16:27 crc kubenswrapper[5102]: W0123 07:16:27.940115 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podac268af7_b49d_40bf_97c8_7abc5ff2bdad.slice/crio-454146c5127d8cddc2ec09c2420088d3298b0f1ed15431d378cd0a37c9c6c4ff WatchSource:0}: Error finding container 454146c5127d8cddc2ec09c2420088d3298b0f1ed15431d378cd0a37c9c6c4ff: Status 404 returned error can't find the container with id 454146c5127d8cddc2ec09c2420088d3298b0f1ed15431d378cd0a37c9c6c4ff Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.147526 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-679f9d9c44-mmp9r" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.147986 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-679f9d9c44-mmp9r" podUID="3d5faa20-dfb4-4dc5-b4e0-21d0fef34cc1" containerName="barbican-api-log" probeResult="failure" output="Get \"http://10.217.0.160:9311/healthcheck\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.625139 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-845d4fc79c-bhsj4" event={"ID":"ac268af7-b49d-40bf-97c8-7abc5ff2bdad","Type":"ContainerStarted","Data":"ff22da91cf223373d458a345f27d72a475d66d4886ff0e810b76172b7cd4967a"} Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.625184 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-845d4fc79c-bhsj4" event={"ID":"ac268af7-b49d-40bf-97c8-7abc5ff2bdad","Type":"ContainerStarted","Data":"454146c5127d8cddc2ec09c2420088d3298b0f1ed15431d378cd0a37c9c6c4ff"} Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.917343 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.917998 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-central-agent" containerID="cri-o://7a02c4db745986b10d877c3eb63b6fd963ff2fa984f62d25a3227b1e3f516c49" gracePeriod=30 Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.918152 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="proxy-httpd" containerID="cri-o://3ddba53ff6c46c602ed4684962f0cba881c97ccaa05dc9a08f07072573bc3515" gracePeriod=30 Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.918196 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="sg-core" containerID="cri-o://3b04b65b8cd8edb865c46578b256807c11c47255c047eb9113a66bdc7991ada1" gracePeriod=30 Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.918230 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-notification-agent" containerID="cri-o://f0f61f561ed55e070cb9198ec7807aeb68701fbf61e7d471ee0c376f0af27da1" gracePeriod=30 Jan 23 07:16:28 crc kubenswrapper[5102]: I0123 07:16:28.961964 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="proxy-httpd" probeResult="failure" output="Get \"http://10.217.0.168:3000/\": EOF" Jan 23 07:16:29 crc kubenswrapper[5102]: I0123 07:16:29.654526 5102 generic.go:334] "Generic (PLEG): container finished" podID="37bb771d-1ead-44bb-9dac-88dacf382850" containerID="3ddba53ff6c46c602ed4684962f0cba881c97ccaa05dc9a08f07072573bc3515" exitCode=0 Jan 23 07:16:29 crc kubenswrapper[5102]: I0123 07:16:29.654836 5102 generic.go:334] "Generic (PLEG): container finished" podID="37bb771d-1ead-44bb-9dac-88dacf382850" containerID="3b04b65b8cd8edb865c46578b256807c11c47255c047eb9113a66bdc7991ada1" exitCode=2 Jan 23 07:16:29 crc kubenswrapper[5102]: I0123 07:16:29.654849 5102 generic.go:334] "Generic (PLEG): container finished" podID="37bb771d-1ead-44bb-9dac-88dacf382850" containerID="7a02c4db745986b10d877c3eb63b6fd963ff2fa984f62d25a3227b1e3f516c49" exitCode=0 Jan 23 07:16:29 crc kubenswrapper[5102]: I0123 07:16:29.654889 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerDied","Data":"3ddba53ff6c46c602ed4684962f0cba881c97ccaa05dc9a08f07072573bc3515"} Jan 23 07:16:29 crc kubenswrapper[5102]: I0123 07:16:29.654923 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerDied","Data":"3b04b65b8cd8edb865c46578b256807c11c47255c047eb9113a66bdc7991ada1"} Jan 23 07:16:29 crc kubenswrapper[5102]: I0123 07:16:29.654941 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerDied","Data":"7a02c4db745986b10d877c3eb63b6fd963ff2fa984f62d25a3227b1e3f516c49"} Jan 23 07:16:31 crc kubenswrapper[5102]: I0123 07:16:31.046264 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/cinder-scheduler-0" Jan 23 07:16:34 crc kubenswrapper[5102]: I0123 07:16:34.720781 5102 generic.go:334] "Generic (PLEG): container finished" podID="37bb771d-1ead-44bb-9dac-88dacf382850" containerID="f0f61f561ed55e070cb9198ec7807aeb68701fbf61e7d471ee0c376f0af27da1" exitCode=0 Jan 23 07:16:34 crc kubenswrapper[5102]: I0123 07:16:34.721270 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerDied","Data":"f0f61f561ed55e070cb9198ec7807aeb68701fbf61e7d471ee0c376f0af27da1"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.305100 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.318596 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.406492 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.413590 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rftbg\" (UniqueName: \"kubernetes.io/projected/b72030e8-a814-47ec-b0f2-edd8b146ff7a-kube-api-access-rftbg\") pod \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.413660 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lnnh2\" (UniqueName: \"kubernetes.io/projected/c92486fb-6bab-4681-b42a-11b5631b265f-kube-api-access-lnnh2\") pod \"c92486fb-6bab-4681-b42a-11b5631b265f\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.413744 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c92486fb-6bab-4681-b42a-11b5631b265f-operator-scripts\") pod \"c92486fb-6bab-4681-b42a-11b5631b265f\" (UID: \"c92486fb-6bab-4681-b42a-11b5631b265f\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.413824 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b72030e8-a814-47ec-b0f2-edd8b146ff7a-operator-scripts\") pod \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\" (UID: \"b72030e8-a814-47ec-b0f2-edd8b146ff7a\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.414968 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b72030e8-a814-47ec-b0f2-edd8b146ff7a-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "b72030e8-a814-47ec-b0f2-edd8b146ff7a" (UID: "b72030e8-a814-47ec-b0f2-edd8b146ff7a"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.415094 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c92486fb-6bab-4681-b42a-11b5631b265f-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "c92486fb-6bab-4681-b42a-11b5631b265f" (UID: "c92486fb-6bab-4681-b42a-11b5631b265f"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.417839 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.417951 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c92486fb-6bab-4681-b42a-11b5631b265f-kube-api-access-lnnh2" (OuterVolumeSpecName: "kube-api-access-lnnh2") pod "c92486fb-6bab-4681-b42a-11b5631b265f" (UID: "c92486fb-6bab-4681-b42a-11b5631b265f"). InnerVolumeSpecName "kube-api-access-lnnh2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.419941 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b72030e8-a814-47ec-b0f2-edd8b146ff7a-kube-api-access-rftbg" (OuterVolumeSpecName: "kube-api-access-rftbg") pod "b72030e8-a814-47ec-b0f2-edd8b146ff7a" (UID: "b72030e8-a814-47ec-b0f2-edd8b146ff7a"). InnerVolumeSpecName "kube-api-access-rftbg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.422894 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.453034 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515380 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27b98baa-da03-46ae-8af3-ca99483f0007-operator-scripts\") pod \"27b98baa-da03-46ae-8af3-ca99483f0007\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515431 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-658pg\" (UniqueName: \"kubernetes.io/projected/27b98baa-da03-46ae-8af3-ca99483f0007-kube-api-access-658pg\") pod \"27b98baa-da03-46ae-8af3-ca99483f0007\" (UID: \"27b98baa-da03-46ae-8af3-ca99483f0007\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515558 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4806efa3-cb85-4d29-956d-63bf181c16be-operator-scripts\") pod \"4806efa3-cb85-4d29-956d-63bf181c16be\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515697 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-operator-scripts\") pod \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515726 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m7mzt\" (UniqueName: \"kubernetes.io/projected/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-kube-api-access-m7mzt\") pod \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\" (UID: \"f9dedf34-d437-4ae1-ada2-46f4ad2b0320\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515747 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2aadd3e-7281-49ae-88b5-611993646185-operator-scripts\") pod \"d2aadd3e-7281-49ae-88b5-611993646185\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515776 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4cncz\" (UniqueName: \"kubernetes.io/projected/4806efa3-cb85-4d29-956d-63bf181c16be-kube-api-access-4cncz\") pod \"4806efa3-cb85-4d29-956d-63bf181c16be\" (UID: \"4806efa3-cb85-4d29-956d-63bf181c16be\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.515964 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f46dg\" (UniqueName: \"kubernetes.io/projected/d2aadd3e-7281-49ae-88b5-611993646185-kube-api-access-f46dg\") pod \"d2aadd3e-7281-49ae-88b5-611993646185\" (UID: \"d2aadd3e-7281-49ae-88b5-611993646185\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.516119 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f9dedf34-d437-4ae1-ada2-46f4ad2b0320" (UID: "f9dedf34-d437-4ae1-ada2-46f4ad2b0320"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.516372 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4806efa3-cb85-4d29-956d-63bf181c16be-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "4806efa3-cb85-4d29-956d-63bf181c16be" (UID: "4806efa3-cb85-4d29-956d-63bf181c16be"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.516384 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/27b98baa-da03-46ae-8af3-ca99483f0007-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "27b98baa-da03-46ae-8af3-ca99483f0007" (UID: "27b98baa-da03-46ae-8af3-ca99483f0007"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.516501 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d2aadd3e-7281-49ae-88b5-611993646185-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d2aadd3e-7281-49ae-88b5-611993646185" (UID: "d2aadd3e-7281-49ae-88b5-611993646185"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517278 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rftbg\" (UniqueName: \"kubernetes.io/projected/b72030e8-a814-47ec-b0f2-edd8b146ff7a-kube-api-access-rftbg\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517310 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lnnh2\" (UniqueName: \"kubernetes.io/projected/c92486fb-6bab-4681-b42a-11b5631b265f-kube-api-access-lnnh2\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517320 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/c92486fb-6bab-4681-b42a-11b5631b265f-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517329 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/27b98baa-da03-46ae-8af3-ca99483f0007-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517338 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/b72030e8-a814-47ec-b0f2-edd8b146ff7a-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517346 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/4806efa3-cb85-4d29-956d-63bf181c16be-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517355 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.517363 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d2aadd3e-7281-49ae-88b5-611993646185-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.519994 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-kube-api-access-m7mzt" (OuterVolumeSpecName: "kube-api-access-m7mzt") pod "f9dedf34-d437-4ae1-ada2-46f4ad2b0320" (UID: "f9dedf34-d437-4ae1-ada2-46f4ad2b0320"). InnerVolumeSpecName "kube-api-access-m7mzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.521685 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/27b98baa-da03-46ae-8af3-ca99483f0007-kube-api-access-658pg" (OuterVolumeSpecName: "kube-api-access-658pg") pod "27b98baa-da03-46ae-8af3-ca99483f0007" (UID: "27b98baa-da03-46ae-8af3-ca99483f0007"). InnerVolumeSpecName "kube-api-access-658pg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.523227 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.523459 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2aadd3e-7281-49ae-88b5-611993646185-kube-api-access-f46dg" (OuterVolumeSpecName: "kube-api-access-f46dg") pod "d2aadd3e-7281-49ae-88b5-611993646185" (UID: "d2aadd3e-7281-49ae-88b5-611993646185"). InnerVolumeSpecName "kube-api-access-f46dg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.524431 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4806efa3-cb85-4d29-956d-63bf181c16be-kube-api-access-4cncz" (OuterVolumeSpecName: "kube-api-access-4cncz") pod "4806efa3-cb85-4d29-956d-63bf181c16be" (UID: "4806efa3-cb85-4d29-956d-63bf181c16be"). InnerVolumeSpecName "kube-api-access-4cncz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.617871 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-scripts\") pod \"37bb771d-1ead-44bb-9dac-88dacf382850\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618037 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-run-httpd\") pod \"37bb771d-1ead-44bb-9dac-88dacf382850\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618089 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zc4qr\" (UniqueName: \"kubernetes.io/projected/37bb771d-1ead-44bb-9dac-88dacf382850-kube-api-access-zc4qr\") pod \"37bb771d-1ead-44bb-9dac-88dacf382850\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618133 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-combined-ca-bundle\") pod \"37bb771d-1ead-44bb-9dac-88dacf382850\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618188 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-sg-core-conf-yaml\") pod \"37bb771d-1ead-44bb-9dac-88dacf382850\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618284 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-config-data\") pod \"37bb771d-1ead-44bb-9dac-88dacf382850\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618326 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-log-httpd\") pod \"37bb771d-1ead-44bb-9dac-88dacf382850\" (UID: \"37bb771d-1ead-44bb-9dac-88dacf382850\") " Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618801 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m7mzt\" (UniqueName: \"kubernetes.io/projected/f9dedf34-d437-4ae1-ada2-46f4ad2b0320-kube-api-access-m7mzt\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618831 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4cncz\" (UniqueName: \"kubernetes.io/projected/4806efa3-cb85-4d29-956d-63bf181c16be-kube-api-access-4cncz\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618846 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f46dg\" (UniqueName: \"kubernetes.io/projected/d2aadd3e-7281-49ae-88b5-611993646185-kube-api-access-f46dg\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.618858 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-658pg\" (UniqueName: \"kubernetes.io/projected/27b98baa-da03-46ae-8af3-ca99483f0007-kube-api-access-658pg\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.619798 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "37bb771d-1ead-44bb-9dac-88dacf382850" (UID: "37bb771d-1ead-44bb-9dac-88dacf382850"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.621087 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "37bb771d-1ead-44bb-9dac-88dacf382850" (UID: "37bb771d-1ead-44bb-9dac-88dacf382850"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.625268 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/37bb771d-1ead-44bb-9dac-88dacf382850-kube-api-access-zc4qr" (OuterVolumeSpecName: "kube-api-access-zc4qr") pod "37bb771d-1ead-44bb-9dac-88dacf382850" (UID: "37bb771d-1ead-44bb-9dac-88dacf382850"). InnerVolumeSpecName "kube-api-access-zc4qr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.640225 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-scripts" (OuterVolumeSpecName: "scripts") pod "37bb771d-1ead-44bb-9dac-88dacf382850" (UID: "37bb771d-1ead-44bb-9dac-88dacf382850"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.664584 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "37bb771d-1ead-44bb-9dac-88dacf382850" (UID: "37bb771d-1ead-44bb-9dac-88dacf382850"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.718802 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "37bb771d-1ead-44bb-9dac-88dacf382850" (UID: "37bb771d-1ead-44bb-9dac-88dacf382850"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.721063 5102 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.721097 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zc4qr\" (UniqueName: \"kubernetes.io/projected/37bb771d-1ead-44bb-9dac-88dacf382850-kube-api-access-zc4qr\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.721110 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.721119 5102 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.721128 5102 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/37bb771d-1ead-44bb-9dac-88dacf382850-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.721144 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.734879 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-config-data" (OuterVolumeSpecName: "config-data") pod "37bb771d-1ead-44bb-9dac-88dacf382850" (UID: "37bb771d-1ead-44bb-9dac-88dacf382850"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.737134 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-db-create-2dzhq" event={"ID":"c92486fb-6bab-4681-b42a-11b5631b265f","Type":"ContainerDied","Data":"34630c175128889931f63f201e795256ce73e4954d82e95f02935d427ceb376d"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.737146 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-db-create-2dzhq" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.737168 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="34630c175128889931f63f201e795256ce73e4954d82e95f02935d427ceb376d" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.738770 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-db-create-ppr5f" event={"ID":"b72030e8-a814-47ec-b0f2-edd8b146ff7a","Type":"ContainerDied","Data":"53422378819b23265b94a6c95a355f2d7c9cce7be8833e63917ce571a074d5b6"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.738807 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="53422378819b23265b94a6c95a355f2d7c9cce7be8833e63917ce571a074d5b6" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.738982 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-db-create-ppr5f" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.740417 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-845d4fc79c-bhsj4" event={"ID":"ac268af7-b49d-40bf-97c8-7abc5ff2bdad","Type":"ContainerStarted","Data":"8c97a629f34f855d74c165a346be69cd13786a9b440301da9b3e18c2d09f3c5d"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.740824 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.740875 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.742124 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" event={"ID":"4806efa3-cb85-4d29-956d-63bf181c16be","Type":"ContainerDied","Data":"7b2a619a0418fcdc6852ca47338f17cb8bdb774bd16d394758ceb41b65dce536"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.742152 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7b2a619a0418fcdc6852ca47338f17cb8bdb774bd16d394758ceb41b65dce536" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.742186 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-wz8lf" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.743517 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-db-create-89jsr" event={"ID":"d2aadd3e-7281-49ae-88b5-611993646185","Type":"ContainerDied","Data":"d21f7f22477557f8a33c6cb558fe805e601bb750452147fada76777f6826920e"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.743603 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d21f7f22477557f8a33c6cb558fe805e601bb750452147fada76777f6826920e" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.743645 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-db-create-89jsr" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.744963 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-7r467" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.745164 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8d62-account-create-update-7r467" event={"ID":"27b98baa-da03-46ae-8af3-ca99483f0007","Type":"ContainerDied","Data":"3416eebcad1f47b12a65fcc610e4bed8dc9a91820caa9e4c679c11d95448561d"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.745189 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="3416eebcad1f47b12a65fcc610e4bed8dc9a91820caa9e4c679c11d95448561d" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.746569 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstackclient" event={"ID":"6d355347-569d-4082-b9fd-66d286ef59be","Type":"ContainerStarted","Data":"f754eb55679f47320849d8ad1be68524f1721681ec6135a0821f6efc11b8ffb1"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.748106 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b157-account-create-update-5dwdj" event={"ID":"f9dedf34-d437-4ae1-ada2-46f4ad2b0320","Type":"ContainerDied","Data":"5709729ee7b8bd603fbf7872333b3b1c8805be0e748ed75f503527bbbaeb52c3"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.748144 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5709729ee7b8bd603fbf7872333b3b1c8805be0e748ed75f503527bbbaeb52c3" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.748342 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-5dwdj" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.750956 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.751865 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"37bb771d-1ead-44bb-9dac-88dacf382850","Type":"ContainerDied","Data":"8949188710d2acefbeeaf2dac877579f491960779f60c555b134c69d2690e9da"} Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.751898 5102 scope.go:117] "RemoveContainer" containerID="3ddba53ff6c46c602ed4684962f0cba881c97ccaa05dc9a08f07072573bc3515" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.751914 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.776100 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podStartSLOduration=9.776077321 podStartE2EDuration="9.776077321s" podCreationTimestamp="2026-01-23 07:16:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:16:35.76319984 +0000 UTC m=+1346.583548815" watchObservedRunningTime="2026-01-23 07:16:35.776077321 +0000 UTC m=+1346.596426296" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.782612 5102 scope.go:117] "RemoveContainer" containerID="3b04b65b8cd8edb865c46578b256807c11c47255c047eb9113a66bdc7991ada1" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.811883 5102 scope.go:117] "RemoveContainer" containerID="f0f61f561ed55e070cb9198ec7807aeb68701fbf61e7d471ee0c376f0af27da1" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.817112 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/openstackclient" podStartSLOduration=2.403002521 podStartE2EDuration="16.817094588s" podCreationTimestamp="2026-01-23 07:16:19 +0000 UTC" firstStartedPulling="2026-01-23 07:16:20.716191288 +0000 UTC m=+1331.536540263" lastFinishedPulling="2026-01-23 07:16:35.130283355 +0000 UTC m=+1345.950632330" observedRunningTime="2026-01-23 07:16:35.810986108 +0000 UTC m=+1346.631335083" watchObservedRunningTime="2026-01-23 07:16:35.817094588 +0000 UTC m=+1346.637443563" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.823067 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/37bb771d-1ead-44bb-9dac-88dacf382850-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.837497 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.850298 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.854650 5102 scope.go:117] "RemoveContainer" containerID="7a02c4db745986b10d877c3eb63b6fd963ff2fa984f62d25a3227b1e3f516c49" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.894387 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895119 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="proxy-httpd" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895144 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="proxy-httpd" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895165 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="sg-core" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895173 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="sg-core" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895197 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f9dedf34-d437-4ae1-ada2-46f4ad2b0320" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895205 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f9dedf34-d437-4ae1-ada2-46f4ad2b0320" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895228 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4806efa3-cb85-4d29-956d-63bf181c16be" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895236 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4806efa3-cb85-4d29-956d-63bf181c16be" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895275 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b72030e8-a814-47ec-b0f2-edd8b146ff7a" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895284 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b72030e8-a814-47ec-b0f2-edd8b146ff7a" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895302 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2aadd3e-7281-49ae-88b5-611993646185" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895310 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2aadd3e-7281-49ae-88b5-611993646185" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895334 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-central-agent" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895345 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-central-agent" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895361 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c92486fb-6bab-4681-b42a-11b5631b265f" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895368 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="c92486fb-6bab-4681-b42a-11b5631b265f" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895394 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="27b98baa-da03-46ae-8af3-ca99483f0007" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895404 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="27b98baa-da03-46ae-8af3-ca99483f0007" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: E0123 07:16:35.895422 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-notification-agent" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895433 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-notification-agent" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895834 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-central-agent" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895870 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2aadd3e-7281-49ae-88b5-611993646185" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895893 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="proxy-httpd" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895902 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f9dedf34-d437-4ae1-ada2-46f4ad2b0320" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895915 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="sg-core" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895938 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4806efa3-cb85-4d29-956d-63bf181c16be" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895967 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b72030e8-a814-47ec-b0f2-edd8b146ff7a" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.895988 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" containerName="ceilometer-notification-agent" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.896009 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="c92486fb-6bab-4681-b42a-11b5631b265f" containerName="mariadb-database-create" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.896024 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="27b98baa-da03-46ae-8af3-ca99483f0007" containerName="mariadb-account-create-update" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.899916 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.904010 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.907307 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.928153 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.928220 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-log-httpd\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.929147 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-config-data\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.929188 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sthh8\" (UniqueName: \"kubernetes.io/projected/11312250-a791-46e0-9f42-d1e50cef9b1b-kube-api-access-sthh8\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.929218 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-scripts\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.929269 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-run-httpd\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.929328 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:35 crc kubenswrapper[5102]: I0123 07:16:35.930654 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.032511 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-log-httpd\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.032587 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-config-data\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.032615 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sthh8\" (UniqueName: \"kubernetes.io/projected/11312250-a791-46e0-9f42-d1e50cef9b1b-kube-api-access-sthh8\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.032640 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-scripts\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.032672 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-run-httpd\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.032708 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.032789 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.033068 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-log-httpd\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.042112 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-run-httpd\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.053075 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-scripts\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.053779 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-config-data\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.064244 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.070158 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.087734 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sthh8\" (UniqueName: \"kubernetes.io/projected/11312250-a791-46e0-9f42-d1e50cef9b1b-kube-api-access-sthh8\") pod \"ceilometer-0\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.267223 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.313136 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.766602 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 07:16:36 crc kubenswrapper[5102]: I0123 07:16:36.809777 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:36 crc kubenswrapper[5102]: W0123 07:16:36.810758 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod11312250_a791_46e0_9f42_d1e50cef9b1b.slice/crio-43fa3f1cbb91d47521d2169a1b228463c3b2d0593c734c2e402b2866a6fdd159 WatchSource:0}: Error finding container 43fa3f1cbb91d47521d2169a1b228463c3b2d0593c734c2e402b2866a6fdd159: Status 404 returned error can't find the container with id 43fa3f1cbb91d47521d2169a1b228463c3b2d0593c734c2e402b2866a6fdd159 Jan 23 07:16:37 crc kubenswrapper[5102]: I0123 07:16:37.317507 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:37 crc kubenswrapper[5102]: I0123 07:16:37.614255 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="37bb771d-1ead-44bb-9dac-88dacf382850" path="/var/lib/kubelet/pods/37bb771d-1ead-44bb-9dac-88dacf382850/volumes" Jan 23 07:16:37 crc kubenswrapper[5102]: I0123 07:16:37.773829 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerStarted","Data":"43fa3f1cbb91d47521d2169a1b228463c3b2d0593c734c2e402b2866a6fdd159"} Jan 23 07:16:38 crc kubenswrapper[5102]: I0123 07:16:38.086844 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:39 crc kubenswrapper[5102]: I0123 07:16:39.792456 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerStarted","Data":"46f81c01a4b8bd3c9573cdb4d2ae5a9407c40f0beb86ad0f8a846615c09a7da2"} Jan 23 07:16:39 crc kubenswrapper[5102]: I0123 07:16:39.793802 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerStarted","Data":"dfaa7bad13e5fac0b6de224f11cc6a6c6dcc24bd72c1f19158d772f115ad4463"} Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.755531 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qvfhk"] Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.756975 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.759683 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-scripts" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.759786 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8n9b4" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.760017 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.772745 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qvfhk"] Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.818187 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerStarted","Data":"48812c7d802be0161ad601f9e7878e7727066561cd389ac77ac2b332a6d43ed6"} Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.836082 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.836136 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-config-data\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.836163 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tklf7\" (UniqueName: \"kubernetes.io/projected/133dbc1b-39f3-41fa-9489-5cd5777f5865-kube-api-access-tklf7\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.836396 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-scripts\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.938017 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-config-data\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.938058 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tklf7\" (UniqueName: \"kubernetes.io/projected/133dbc1b-39f3-41fa-9489-5cd5777f5865-kube-api-access-tklf7\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.938125 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-scripts\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.938221 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.943483 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-combined-ca-bundle\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.943734 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-config-data\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.944914 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-scripts\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:40 crc kubenswrapper[5102]: I0123 07:16:40.957683 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tklf7\" (UniqueName: \"kubernetes.io/projected/133dbc1b-39f3-41fa-9489-5cd5777f5865-kube-api-access-tklf7\") pod \"nova-cell0-conductor-db-sync-qvfhk\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:41 crc kubenswrapper[5102]: I0123 07:16:41.072526 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:16:41 crc kubenswrapper[5102]: W0123 07:16:41.579588 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod133dbc1b_39f3_41fa_9489_5cd5777f5865.slice/crio-4f726959b83e78e5163ea39d8ec1d258b9c253dedb3465218a88244dc74df674 WatchSource:0}: Error finding container 4f726959b83e78e5163ea39d8ec1d258b9c253dedb3465218a88244dc74df674: Status 404 returned error can't find the container with id 4f726959b83e78e5163ea39d8ec1d258b9c253dedb3465218a88244dc74df674 Jan 23 07:16:41 crc kubenswrapper[5102]: I0123 07:16:41.589184 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qvfhk"] Jan 23 07:16:41 crc kubenswrapper[5102]: I0123 07:16:41.828512 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" event={"ID":"133dbc1b-39f3-41fa-9489-5cd5777f5865","Type":"ContainerStarted","Data":"4f726959b83e78e5163ea39d8ec1d258b9c253dedb3465218a88244dc74df674"} Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.315316 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.870818 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerStarted","Data":"7efda7cff3feac0d8e34a30183bc75976b73a2ec4e8b8f5f42acb545c37cd954"} Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.872931 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-central-agent" containerID="cri-o://dfaa7bad13e5fac0b6de224f11cc6a6c6dcc24bd72c1f19158d772f115ad4463" gracePeriod=30 Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.873026 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.873027 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="proxy-httpd" containerID="cri-o://7efda7cff3feac0d8e34a30183bc75976b73a2ec4e8b8f5f42acb545c37cd954" gracePeriod=30 Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.873068 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="sg-core" containerID="cri-o://48812c7d802be0161ad601f9e7878e7727066561cd389ac77ac2b332a6d43ed6" gracePeriod=30 Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.873112 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-notification-agent" containerID="cri-o://46f81c01a4b8bd3c9573cdb4d2ae5a9407c40f0beb86ad0f8a846615c09a7da2" gracePeriod=30 Jan 23 07:16:42 crc kubenswrapper[5102]: I0123 07:16:42.911094 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.679791352 podStartE2EDuration="7.911070028s" podCreationTimestamp="2026-01-23 07:16:35 +0000 UTC" firstStartedPulling="2026-01-23 07:16:36.813488104 +0000 UTC m=+1347.633837079" lastFinishedPulling="2026-01-23 07:16:42.04476678 +0000 UTC m=+1352.865115755" observedRunningTime="2026-01-23 07:16:42.894954226 +0000 UTC m=+1353.715303211" watchObservedRunningTime="2026-01-23 07:16:42.911070028 +0000 UTC m=+1353.731419003" Jan 23 07:16:43 crc kubenswrapper[5102]: I0123 07:16:43.882274 5102 generic.go:334] "Generic (PLEG): container finished" podID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerID="7efda7cff3feac0d8e34a30183bc75976b73a2ec4e8b8f5f42acb545c37cd954" exitCode=0 Jan 23 07:16:43 crc kubenswrapper[5102]: I0123 07:16:43.882306 5102 generic.go:334] "Generic (PLEG): container finished" podID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerID="48812c7d802be0161ad601f9e7878e7727066561cd389ac77ac2b332a6d43ed6" exitCode=2 Jan 23 07:16:43 crc kubenswrapper[5102]: I0123 07:16:43.882314 5102 generic.go:334] "Generic (PLEG): container finished" podID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerID="46f81c01a4b8bd3c9573cdb4d2ae5a9407c40f0beb86ad0f8a846615c09a7da2" exitCode=0 Jan 23 07:16:43 crc kubenswrapper[5102]: I0123 07:16:43.882343 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerDied","Data":"7efda7cff3feac0d8e34a30183bc75976b73a2ec4e8b8f5f42acb545c37cd954"} Jan 23 07:16:43 crc kubenswrapper[5102]: I0123 07:16:43.882403 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerDied","Data":"48812c7d802be0161ad601f9e7878e7727066561cd389ac77ac2b332a6d43ed6"} Jan 23 07:16:43 crc kubenswrapper[5102]: I0123 07:16:43.882416 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerDied","Data":"46f81c01a4b8bd3c9573cdb4d2ae5a9407c40f0beb86ad0f8a846615c09a7da2"} Jan 23 07:16:44 crc kubenswrapper[5102]: I0123 07:16:44.405621 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/neutron-795454f649-697pp" Jan 23 07:16:44 crc kubenswrapper[5102]: I0123 07:16:44.474397 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f649ddc48-2nj2r"] Jan 23 07:16:44 crc kubenswrapper[5102]: I0123 07:16:44.474673 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f649ddc48-2nj2r" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-api" containerID="cri-o://ecdec7d2302573b8754f4553efc7bcdaf8c97a42c627b3ecd9ab01aabac92543" gracePeriod=30 Jan 23 07:16:44 crc kubenswrapper[5102]: I0123 07:16:44.474718 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-7f649ddc48-2nj2r" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-httpd" containerID="cri-o://40c6f25d79f4df1418343eb41d1ad7f6f123243049310ace1c310db6af2c2917" gracePeriod=30 Jan 23 07:16:44 crc kubenswrapper[5102]: I0123 07:16:44.894970 5102 generic.go:334] "Generic (PLEG): container finished" podID="31653b49-9041-436e-a628-9334fab6d8d9" containerID="40c6f25d79f4df1418343eb41d1ad7f6f123243049310ace1c310db6af2c2917" exitCode=0 Jan 23 07:16:44 crc kubenswrapper[5102]: I0123 07:16:44.895026 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f649ddc48-2nj2r" event={"ID":"31653b49-9041-436e-a628-9334fab6d8d9","Type":"ContainerDied","Data":"40c6f25d79f4df1418343eb41d1ad7f6f123243049310ace1c310db6af2c2917"} Jan 23 07:16:49 crc kubenswrapper[5102]: I0123 07:16:49.960472 5102 generic.go:334] "Generic (PLEG): container finished" podID="31653b49-9041-436e-a628-9334fab6d8d9" containerID="ecdec7d2302573b8754f4553efc7bcdaf8c97a42c627b3ecd9ab01aabac92543" exitCode=0 Jan 23 07:16:49 crc kubenswrapper[5102]: I0123 07:16:49.960658 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f649ddc48-2nj2r" event={"ID":"31653b49-9041-436e-a628-9334fab6d8d9","Type":"ContainerDied","Data":"ecdec7d2302573b8754f4553efc7bcdaf8c97a42c627b3ecd9ab01aabac92543"} Jan 23 07:16:49 crc kubenswrapper[5102]: I0123 07:16:49.968717 5102 generic.go:334] "Generic (PLEG): container finished" podID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerID="dfaa7bad13e5fac0b6de224f11cc6a6c6dcc24bd72c1f19158d772f115ad4463" exitCode=0 Jan 23 07:16:49 crc kubenswrapper[5102]: I0123 07:16:49.968753 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerDied","Data":"dfaa7bad13e5fac0b6de224f11cc6a6c6dcc24bd72c1f19158d772f115ad4463"} Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.296215 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.451204 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sthh8\" (UniqueName: \"kubernetes.io/projected/11312250-a791-46e0-9f42-d1e50cef9b1b-kube-api-access-sthh8\") pod \"11312250-a791-46e0-9f42-d1e50cef9b1b\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.451284 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-combined-ca-bundle\") pod \"11312250-a791-46e0-9f42-d1e50cef9b1b\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.451326 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-sg-core-conf-yaml\") pod \"11312250-a791-46e0-9f42-d1e50cef9b1b\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.451356 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-log-httpd\") pod \"11312250-a791-46e0-9f42-d1e50cef9b1b\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.451450 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-run-httpd\") pod \"11312250-a791-46e0-9f42-d1e50cef9b1b\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.451500 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-scripts\") pod \"11312250-a791-46e0-9f42-d1e50cef9b1b\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.451519 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-config-data\") pod \"11312250-a791-46e0-9f42-d1e50cef9b1b\" (UID: \"11312250-a791-46e0-9f42-d1e50cef9b1b\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.452954 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "11312250-a791-46e0-9f42-d1e50cef9b1b" (UID: "11312250-a791-46e0-9f42-d1e50cef9b1b"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.453063 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "11312250-a791-46e0-9f42-d1e50cef9b1b" (UID: "11312250-a791-46e0-9f42-d1e50cef9b1b"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.465744 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/11312250-a791-46e0-9f42-d1e50cef9b1b-kube-api-access-sthh8" (OuterVolumeSpecName: "kube-api-access-sthh8") pod "11312250-a791-46e0-9f42-d1e50cef9b1b" (UID: "11312250-a791-46e0-9f42-d1e50cef9b1b"). InnerVolumeSpecName "kube-api-access-sthh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.478746 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-scripts" (OuterVolumeSpecName: "scripts") pod "11312250-a791-46e0-9f42-d1e50cef9b1b" (UID: "11312250-a791-46e0-9f42-d1e50cef9b1b"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.525582 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "11312250-a791-46e0-9f42-d1e50cef9b1b" (UID: "11312250-a791-46e0-9f42-d1e50cef9b1b"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.553832 5102 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.553859 5102 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.553868 5102 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/11312250-a791-46e0-9f42-d1e50cef9b1b-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.553876 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.553885 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sthh8\" (UniqueName: \"kubernetes.io/projected/11312250-a791-46e0-9f42-d1e50cef9b1b-kube-api-access-sthh8\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.554407 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.628890 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-config-data" (OuterVolumeSpecName: "config-data") pod "11312250-a791-46e0-9f42-d1e50cef9b1b" (UID: "11312250-a791-46e0-9f42-d1e50cef9b1b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.629110 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "11312250-a791-46e0-9f42-d1e50cef9b1b" (UID: "11312250-a791-46e0-9f42-d1e50cef9b1b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.655112 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-combined-ca-bundle\") pod \"31653b49-9041-436e-a628-9334fab6d8d9\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.655157 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-httpd-config\") pod \"31653b49-9041-436e-a628-9334fab6d8d9\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.655322 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-ovndb-tls-certs\") pod \"31653b49-9041-436e-a628-9334fab6d8d9\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.655357 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-config\") pod \"31653b49-9041-436e-a628-9334fab6d8d9\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.655391 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lpz25\" (UniqueName: \"kubernetes.io/projected/31653b49-9041-436e-a628-9334fab6d8d9-kube-api-access-lpz25\") pod \"31653b49-9041-436e-a628-9334fab6d8d9\" (UID: \"31653b49-9041-436e-a628-9334fab6d8d9\") " Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.656500 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.656553 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/11312250-a791-46e0-9f42-d1e50cef9b1b-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.680868 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "31653b49-9041-436e-a628-9334fab6d8d9" (UID: "31653b49-9041-436e-a628-9334fab6d8d9"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.683972 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31653b49-9041-436e-a628-9334fab6d8d9-kube-api-access-lpz25" (OuterVolumeSpecName: "kube-api-access-lpz25") pod "31653b49-9041-436e-a628-9334fab6d8d9" (UID: "31653b49-9041-436e-a628-9334fab6d8d9"). InnerVolumeSpecName "kube-api-access-lpz25". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.760950 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lpz25\" (UniqueName: \"kubernetes.io/projected/31653b49-9041-436e-a628-9334fab6d8d9-kube-api-access-lpz25\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.761523 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.770583 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "31653b49-9041-436e-a628-9334fab6d8d9" (UID: "31653b49-9041-436e-a628-9334fab6d8d9"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.799986 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "31653b49-9041-436e-a628-9334fab6d8d9" (UID: "31653b49-9041-436e-a628-9334fab6d8d9"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.805651 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-config" (OuterVolumeSpecName: "config") pod "31653b49-9041-436e-a628-9334fab6d8d9" (UID: "31653b49-9041-436e-a628-9334fab6d8d9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.863135 5102 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.863175 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.863186 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/31653b49-9041-436e-a628-9334fab6d8d9-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.989687 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-7f649ddc48-2nj2r" event={"ID":"31653b49-9041-436e-a628-9334fab6d8d9","Type":"ContainerDied","Data":"8de16909c1909389452e97e3e2a20f5486bd00f9751d4695b6832aae28c95644"} Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.989741 5102 scope.go:117] "RemoveContainer" containerID="40c6f25d79f4df1418343eb41d1ad7f6f123243049310ace1c310db6af2c2917" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.990071 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-7f649ddc48-2nj2r" Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.992217 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" event={"ID":"133dbc1b-39f3-41fa-9489-5cd5777f5865","Type":"ContainerStarted","Data":"da75066d37eacfdc50001d433632728dac882b39846bcaa1807e9ee3518a7bde"} Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.996300 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"11312250-a791-46e0-9f42-d1e50cef9b1b","Type":"ContainerDied","Data":"43fa3f1cbb91d47521d2169a1b228463c3b2d0593c734c2e402b2866a6fdd159"} Jan 23 07:16:51 crc kubenswrapper[5102]: I0123 07:16:51.996392 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.016742 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" podStartSLOduration=2.525706565 podStartE2EDuration="12.016715907s" podCreationTimestamp="2026-01-23 07:16:40 +0000 UTC" firstStartedPulling="2026-01-23 07:16:41.581941427 +0000 UTC m=+1352.402290402" lastFinishedPulling="2026-01-23 07:16:51.072950759 +0000 UTC m=+1361.893299744" observedRunningTime="2026-01-23 07:16:52.00587547 +0000 UTC m=+1362.826224445" watchObservedRunningTime="2026-01-23 07:16:52.016715907 +0000 UTC m=+1362.837064882" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.056978 5102 scope.go:117] "RemoveContainer" containerID="ecdec7d2302573b8754f4553efc7bcdaf8c97a42c627b3ecd9ab01aabac92543" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.081707 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-7f649ddc48-2nj2r"] Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.096794 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-7f649ddc48-2nj2r"] Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.101327 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.112169 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.114752 5102 scope.go:117] "RemoveContainer" containerID="7efda7cff3feac0d8e34a30183bc75976b73a2ec4e8b8f5f42acb545c37cd954" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124297 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:52 crc kubenswrapper[5102]: E0123 07:16:52.124709 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-notification-agent" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124726 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-notification-agent" Jan 23 07:16:52 crc kubenswrapper[5102]: E0123 07:16:52.124737 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-httpd" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124746 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-httpd" Jan 23 07:16:52 crc kubenswrapper[5102]: E0123 07:16:52.124762 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-api" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124771 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-api" Jan 23 07:16:52 crc kubenswrapper[5102]: E0123 07:16:52.124786 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-central-agent" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124793 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-central-agent" Jan 23 07:16:52 crc kubenswrapper[5102]: E0123 07:16:52.124804 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="sg-core" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124810 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="sg-core" Jan 23 07:16:52 crc kubenswrapper[5102]: E0123 07:16:52.124820 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="proxy-httpd" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124826 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="proxy-httpd" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.124987 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-notification-agent" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.125001 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="proxy-httpd" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.125012 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="ceilometer-central-agent" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.125020 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-api" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.125029 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" containerName="sg-core" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.125046 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="31653b49-9041-436e-a628-9334fab6d8d9" containerName="neutron-httpd" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.126678 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.130997 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.131215 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.134033 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.145843 5102 scope.go:117] "RemoveContainer" containerID="48812c7d802be0161ad601f9e7878e7727066561cd389ac77ac2b332a6d43ed6" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.167891 5102 scope.go:117] "RemoveContainer" containerID="46f81c01a4b8bd3c9573cdb4d2ae5a9407c40f0beb86ad0f8a846615c09a7da2" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.189752 5102 scope.go:117] "RemoveContainer" containerID="dfaa7bad13e5fac0b6de224f11cc6a6c6dcc24bd72c1f19158d772f115ad4463" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.272570 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-run-httpd\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.272711 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-config-data\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.272761 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.273257 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-log-httpd\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.273378 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l5dwh\" (UniqueName: \"kubernetes.io/projected/13aa228b-3510-4424-b9dd-e9745a2420e6-kube-api-access-l5dwh\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.273452 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-scripts\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.273518 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.374717 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-scripts\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.374785 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.374804 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-run-httpd\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.374825 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-config-data\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.374841 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.375685 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-run-httpd\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.375714 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-log-httpd\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.375926 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l5dwh\" (UniqueName: \"kubernetes.io/projected/13aa228b-3510-4424-b9dd-e9745a2420e6-kube-api-access-l5dwh\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.376286 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-log-httpd\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.380164 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-scripts\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.380284 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-config-data\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.380472 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.392218 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.395281 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l5dwh\" (UniqueName: \"kubernetes.io/projected/13aa228b-3510-4424-b9dd-e9745a2420e6-kube-api-access-l5dwh\") pod \"ceilometer-0\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.445501 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:16:52 crc kubenswrapper[5102]: W0123 07:16:52.957137 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod13aa228b_3510_4424_b9dd_e9745a2420e6.slice/crio-18c4584e0f9e0acdf2b6515614c618510847889ab5fc379a726527c92d4c7a66 WatchSource:0}: Error finding container 18c4584e0f9e0acdf2b6515614c618510847889ab5fc379a726527c92d4c7a66: Status 404 returned error can't find the container with id 18c4584e0f9e0acdf2b6515614c618510847889ab5fc379a726527c92d4c7a66 Jan 23 07:16:52 crc kubenswrapper[5102]: I0123 07:16:52.957685 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:53 crc kubenswrapper[5102]: I0123 07:16:53.029917 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerStarted","Data":"18c4584e0f9e0acdf2b6515614c618510847889ab5fc379a726527c92d4c7a66"} Jan 23 07:16:53 crc kubenswrapper[5102]: I0123 07:16:53.432608 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:16:53 crc kubenswrapper[5102]: I0123 07:16:53.609837 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="11312250-a791-46e0-9f42-d1e50cef9b1b" path="/var/lib/kubelet/pods/11312250-a791-46e0-9f42-d1e50cef9b1b/volumes" Jan 23 07:16:53 crc kubenswrapper[5102]: I0123 07:16:53.610643 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31653b49-9041-436e-a628-9334fab6d8d9" path="/var/lib/kubelet/pods/31653b49-9041-436e-a628-9334fab6d8d9/volumes" Jan 23 07:16:53 crc kubenswrapper[5102]: I0123 07:16:53.878846 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:16:53 crc kubenswrapper[5102]: I0123 07:16:53.879409 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-log" containerID="cri-o://b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151" gracePeriod=30 Jan 23 07:16:53 crc kubenswrapper[5102]: I0123 07:16:53.879637 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-httpd" containerID="cri-o://0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21" gracePeriod=30 Jan 23 07:16:54 crc kubenswrapper[5102]: I0123 07:16:54.045179 5102 generic.go:334] "Generic (PLEG): container finished" podID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerID="b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151" exitCode=143 Jan 23 07:16:54 crc kubenswrapper[5102]: I0123 07:16:54.045286 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10554b39-ce02-4ee0-ba52-9e54f14065ad","Type":"ContainerDied","Data":"b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151"} Jan 23 07:16:54 crc kubenswrapper[5102]: I0123 07:16:54.047224 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerStarted","Data":"56868845e6ca22358664384702fa4ce1246131afbda19ddf10daab40d90a98ba"} Jan 23 07:16:55 crc kubenswrapper[5102]: I0123 07:16:55.848862 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:16:55 crc kubenswrapper[5102]: I0123 07:16:55.849635 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-log" containerID="cri-o://c176635075d963258985742562876369611ebdde0a76ef4ae19d6651aea95d35" gracePeriod=30 Jan 23 07:16:55 crc kubenswrapper[5102]: I0123 07:16:55.849763 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-httpd" containerID="cri-o://892105c146ee06febdf3ffc361005cc973d3bb16201b390dd4dfdc6f24ca8ed0" gracePeriod=30 Jan 23 07:16:56 crc kubenswrapper[5102]: I0123 07:16:56.072714 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerStarted","Data":"ffc580056cbc54142fc39afab9eb0c2dcaaec5bbb399deaf0929c56ddb885f2f"} Jan 23 07:16:56 crc kubenswrapper[5102]: I0123 07:16:56.076358 5102 generic.go:334] "Generic (PLEG): container finished" podID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerID="c176635075d963258985742562876369611ebdde0a76ef4ae19d6651aea95d35" exitCode=143 Jan 23 07:16:56 crc kubenswrapper[5102]: I0123 07:16:56.076421 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"35f0a765-fbac-4583-ade4-5ecb8d6d3264","Type":"ContainerDied","Data":"c176635075d963258985742562876369611ebdde0a76ef4ae19d6651aea95d35"} Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.088938 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerStarted","Data":"db0b5adeb2a555272b671c5a064386733799331b44521a212eb8c2b6d8db207a"} Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.574339 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687131 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-scripts\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687217 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687326 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-public-tls-certs\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687365 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-config-data\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687418 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-httpd-run\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687448 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wdcqn\" (UniqueName: \"kubernetes.io/projected/10554b39-ce02-4ee0-ba52-9e54f14065ad-kube-api-access-wdcqn\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687708 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-logs\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.687773 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-combined-ca-bundle\") pod \"10554b39-ce02-4ee0-ba52-9e54f14065ad\" (UID: \"10554b39-ce02-4ee0-ba52-9e54f14065ad\") " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.688098 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.688212 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-logs" (OuterVolumeSpecName: "logs") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.688565 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.688594 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/10554b39-ce02-4ee0-ba52-9e54f14065ad-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.702185 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.705685 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-scripts" (OuterVolumeSpecName: "scripts") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.712790 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/10554b39-ce02-4ee0-ba52-9e54f14065ad-kube-api-access-wdcqn" (OuterVolumeSpecName: "kube-api-access-wdcqn") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "kube-api-access-wdcqn". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.800226 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.800268 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.800279 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wdcqn\" (UniqueName: \"kubernetes.io/projected/10554b39-ce02-4ee0-ba52-9e54f14065ad-kube-api-access-wdcqn\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.842761 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-config-data" (OuterVolumeSpecName: "config-data") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.867342 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.875750 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.901658 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.901685 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.901695 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:57 crc kubenswrapper[5102]: I0123 07:16:57.906653 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "10554b39-ce02-4ee0-ba52-9e54f14065ad" (UID: "10554b39-ce02-4ee0-ba52-9e54f14065ad"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.002918 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/10554b39-ce02-4ee0-ba52-9e54f14065ad-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.098308 5102 generic.go:334] "Generic (PLEG): container finished" podID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerID="0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21" exitCode=0 Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.098389 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.098388 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10554b39-ce02-4ee0-ba52-9e54f14065ad","Type":"ContainerDied","Data":"0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21"} Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.098448 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"10554b39-ce02-4ee0-ba52-9e54f14065ad","Type":"ContainerDied","Data":"c480f251b694bffcbb907c68fef63823aa9c08b64fa91b755fe32e8aef4ed094"} Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.098473 5102 scope.go:117] "RemoveContainer" containerID="0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.107787 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerStarted","Data":"edb6be1ebb76f4f9ff7af6fb07ea4a47e81a16d609e59271fddd77fb1ec66f5f"} Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.107956 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-central-agent" containerID="cri-o://56868845e6ca22358664384702fa4ce1246131afbda19ddf10daab40d90a98ba" gracePeriod=30 Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.108110 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.108412 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="proxy-httpd" containerID="cri-o://edb6be1ebb76f4f9ff7af6fb07ea4a47e81a16d609e59271fddd77fb1ec66f5f" gracePeriod=30 Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.108574 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="sg-core" containerID="cri-o://db0b5adeb2a555272b671c5a064386733799331b44521a212eb8c2b6d8db207a" gracePeriod=30 Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.108612 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-notification-agent" containerID="cri-o://ffc580056cbc54142fc39afab9eb0c2dcaaec5bbb399deaf0929c56ddb885f2f" gracePeriod=30 Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.136217 5102 scope.go:117] "RemoveContainer" containerID="b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.142494 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=1.271901816 podStartE2EDuration="6.142473347s" podCreationTimestamp="2026-01-23 07:16:52 +0000 UTC" firstStartedPulling="2026-01-23 07:16:52.959911487 +0000 UTC m=+1363.780260472" lastFinishedPulling="2026-01-23 07:16:57.830483028 +0000 UTC m=+1368.650832003" observedRunningTime="2026-01-23 07:16:58.130123099 +0000 UTC m=+1368.950472074" watchObservedRunningTime="2026-01-23 07:16:58.142473347 +0000 UTC m=+1368.962822322" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.154734 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.164468 5102 scope.go:117] "RemoveContainer" containerID="0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21" Jan 23 07:16:58 crc kubenswrapper[5102]: E0123 07:16:58.164849 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21\": container with ID starting with 0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21 not found: ID does not exist" containerID="0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.164878 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21"} err="failed to get container status \"0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21\": rpc error: code = NotFound desc = could not find container \"0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21\": container with ID starting with 0247b4850ae720471955d4a547f6d136f1672bb1f63f0d9d87214bf35dc5ae21 not found: ID does not exist" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.164902 5102 scope.go:117] "RemoveContainer" containerID="b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151" Jan 23 07:16:58 crc kubenswrapper[5102]: E0123 07:16:58.165268 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151\": container with ID starting with b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151 not found: ID does not exist" containerID="b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.165298 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151"} err="failed to get container status \"b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151\": rpc error: code = NotFound desc = could not find container \"b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151\": container with ID starting with b4771afc31b96ce8b13c36ad51b6e63c199636ac3cd839c645e954d564ee8151 not found: ID does not exist" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.167619 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.194045 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:16:58 crc kubenswrapper[5102]: E0123 07:16:58.194467 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-log" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.194483 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-log" Jan 23 07:16:58 crc kubenswrapper[5102]: E0123 07:16:58.194517 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-httpd" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.194524 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-httpd" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.194715 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-httpd" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.194735 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" containerName="glance-log" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.195662 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.200639 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-external-config-data" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.200826 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-public-svc" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.204373 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308069 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-scripts\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308117 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308175 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-config-data\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308300 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308465 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308625 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l2wmg\" (UniqueName: \"kubernetes.io/projected/6c3459b4-efed-4868-8fd0-ffeb07f0100d-kube-api-access-l2wmg\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308699 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-logs\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.308895 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410619 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410701 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l2wmg\" (UniqueName: \"kubernetes.io/projected/6c3459b4-efed-4868-8fd0-ffeb07f0100d-kube-api-access-l2wmg\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410731 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-logs\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410782 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410813 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-scripts\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410835 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410877 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-config-data\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.410909 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.411457 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-httpd-run\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.412314 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") device mount path \"/mnt/openstack/pv11\"" pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.415020 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-logs\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.417954 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-public-tls-certs\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.421269 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-combined-ca-bundle\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.421754 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-scripts\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.433491 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-config-data\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.441675 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l2wmg\" (UniqueName: \"kubernetes.io/projected/6c3459b4-efed-4868-8fd0-ffeb07f0100d-kube-api-access-l2wmg\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.453096 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"glance-default-external-api-0\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " pod="openstack/glance-default-external-api-0" Jan 23 07:16:58 crc kubenswrapper[5102]: I0123 07:16:58.538384 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.121626 5102 generic.go:334] "Generic (PLEG): container finished" podID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerID="892105c146ee06febdf3ffc361005cc973d3bb16201b390dd4dfdc6f24ca8ed0" exitCode=0 Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.121882 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"35f0a765-fbac-4583-ade4-5ecb8d6d3264","Type":"ContainerDied","Data":"892105c146ee06febdf3ffc361005cc973d3bb16201b390dd4dfdc6f24ca8ed0"} Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.134754 5102 generic.go:334] "Generic (PLEG): container finished" podID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerID="db0b5adeb2a555272b671c5a064386733799331b44521a212eb8c2b6d8db207a" exitCode=2 Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.135014 5102 generic.go:334] "Generic (PLEG): container finished" podID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerID="ffc580056cbc54142fc39afab9eb0c2dcaaec5bbb399deaf0929c56ddb885f2f" exitCode=0 Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.135111 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerDied","Data":"db0b5adeb2a555272b671c5a064386733799331b44521a212eb8c2b6d8db207a"} Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.135197 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerDied","Data":"ffc580056cbc54142fc39afab9eb0c2dcaaec5bbb399deaf0929c56ddb885f2f"} Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.153670 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:16:59 crc kubenswrapper[5102]: W0123 07:16:59.157685 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod6c3459b4_efed_4868_8fd0_ffeb07f0100d.slice/crio-7275eefaa50c0f547dc535e4091e47190a2783c71fe8af0be988690b146870a7 WatchSource:0}: Error finding container 7275eefaa50c0f547dc535e4091e47190a2783c71fe8af0be988690b146870a7: Status 404 returned error can't find the container with id 7275eefaa50c0f547dc535e4091e47190a2783c71fe8af0be988690b146870a7 Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.442423 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.532672 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.532758 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tvbc6\" (UniqueName: \"kubernetes.io/projected/35f0a765-fbac-4583-ade4-5ecb8d6d3264-kube-api-access-tvbc6\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.532815 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-httpd-run\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.532887 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-config-data\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.532962 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-scripts\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.532991 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-combined-ca-bundle\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.533012 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-logs\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.533038 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-internal-tls-certs\") pod \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\" (UID: \"35f0a765-fbac-4583-ade4-5ecb8d6d3264\") " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.534172 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.539173 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35f0a765-fbac-4583-ade4-5ecb8d6d3264-kube-api-access-tvbc6" (OuterVolumeSpecName: "kube-api-access-tvbc6") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "kube-api-access-tvbc6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.539959 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-scripts" (OuterVolumeSpecName: "scripts") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.540023 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-logs" (OuterVolumeSpecName: "logs") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.543508 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.560394 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.608439 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-config-data" (OuterVolumeSpecName: "config-data") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.618836 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "35f0a765-fbac-4583-ade4-5ecb8d6d3264" (UID: "35f0a765-fbac-4583-ade4-5ecb8d6d3264"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.627179 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="10554b39-ce02-4ee0-ba52-9e54f14065ad" path="/var/lib/kubelet/pods/10554b39-ce02-4ee0-ba52-9e54f14065ad/volumes" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640415 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tvbc6\" (UniqueName: \"kubernetes.io/projected/35f0a765-fbac-4583-ade4-5ecb8d6d3264-kube-api-access-tvbc6\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640448 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640459 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640467 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640476 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640484 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35f0a765-fbac-4583-ade4-5ecb8d6d3264-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640491 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35f0a765-fbac-4583-ade4-5ecb8d6d3264-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.640514 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.662419 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 23 07:16:59 crc kubenswrapper[5102]: I0123 07:16:59.742960 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.145603 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6c3459b4-efed-4868-8fd0-ffeb07f0100d","Type":"ContainerStarted","Data":"107c5f3d9db926ad82c1955c3fd0cba07ea73b8de197843699ad0edcedf0354b"} Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.146869 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6c3459b4-efed-4868-8fd0-ffeb07f0100d","Type":"ContainerStarted","Data":"7275eefaa50c0f547dc535e4091e47190a2783c71fe8af0be988690b146870a7"} Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.150077 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"35f0a765-fbac-4583-ade4-5ecb8d6d3264","Type":"ContainerDied","Data":"91f6632c5ebfcbf1dfd15e1801623e834944f0f73a77ff987834985e15a4d1ac"} Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.150113 5102 scope.go:117] "RemoveContainer" containerID="892105c146ee06febdf3ffc361005cc973d3bb16201b390dd4dfdc6f24ca8ed0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.150282 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.176267 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.190884 5102 scope.go:117] "RemoveContainer" containerID="c176635075d963258985742562876369611ebdde0a76ef4ae19d6651aea95d35" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.197809 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.283457 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:17:00 crc kubenswrapper[5102]: E0123 07:17:00.283914 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-httpd" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.283945 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-httpd" Jan 23 07:17:00 crc kubenswrapper[5102]: E0123 07:17:00.283962 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-log" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.283969 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-log" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.284172 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-httpd" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.284196 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" containerName="glance-log" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.288163 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.291136 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-glance-default-internal-svc" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.291304 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"glance-default-internal-config-data" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.291475 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.485547 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hh95l\" (UniqueName: \"kubernetes.io/projected/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-kube-api-access-hh95l\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.485608 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.485635 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.485656 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.485756 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-logs\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.485893 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.485966 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.486058 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588057 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-logs\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588131 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588154 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588181 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588228 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hh95l\" (UniqueName: \"kubernetes.io/projected/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-kube-api-access-hh95l\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588263 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588284 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588300 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.588881 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-logs\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.593526 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-internal-tls-certs\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.594479 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-combined-ca-bundle\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.594785 5102 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") device mount path \"/mnt/openstack/pv06\"" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.596927 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-httpd-run\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.612552 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-config-data\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.617350 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-scripts\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.628478 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hh95l\" (UniqueName: \"kubernetes.io/projected/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-kube-api-access-hh95l\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.640333 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"glance-default-internal-api-0\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " pod="openstack/glance-default-internal-api-0" Jan 23 07:17:00 crc kubenswrapper[5102]: I0123 07:17:00.919629 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:01 crc kubenswrapper[5102]: W0123 07:17:01.477690 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod302ce3d2_72f6_429c_b3cb_16e8fba0d04e.slice/crio-1424d3138b9fc171ad05c4e12d1b5bc51aa190950e5a6c16f8a8772396a4f512 WatchSource:0}: Error finding container 1424d3138b9fc171ad05c4e12d1b5bc51aa190950e5a6c16f8a8772396a4f512: Status 404 returned error can't find the container with id 1424d3138b9fc171ad05c4e12d1b5bc51aa190950e5a6c16f8a8772396a4f512 Jan 23 07:17:01 crc kubenswrapper[5102]: I0123 07:17:01.479036 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:17:01 crc kubenswrapper[5102]: I0123 07:17:01.620229 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35f0a765-fbac-4583-ade4-5ecb8d6d3264" path="/var/lib/kubelet/pods/35f0a765-fbac-4583-ade4-5ecb8d6d3264/volumes" Jan 23 07:17:02 crc kubenswrapper[5102]: I0123 07:17:02.172416 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"302ce3d2-72f6-429c-b3cb-16e8fba0d04e","Type":"ContainerStarted","Data":"1424d3138b9fc171ad05c4e12d1b5bc51aa190950e5a6c16f8a8772396a4f512"} Jan 23 07:17:03 crc kubenswrapper[5102]: I0123 07:17:03.183078 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6c3459b4-efed-4868-8fd0-ffeb07f0100d","Type":"ContainerStarted","Data":"d16b0a4419002db2415cab085fc8a5390ea935e4fea5424b97b0f8ead9c68fef"} Jan 23 07:17:03 crc kubenswrapper[5102]: I0123 07:17:03.188590 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"302ce3d2-72f6-429c-b3cb-16e8fba0d04e","Type":"ContainerStarted","Data":"4f979b76f22ef2e8f8509c19caa21930b8a908a0e7b25aba0b15129e8e286021"} Jan 23 07:17:03 crc kubenswrapper[5102]: I0123 07:17:03.188638 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"302ce3d2-72f6-429c-b3cb-16e8fba0d04e","Type":"ContainerStarted","Data":"868d87cea06d5b9482b8147a33f18e2828a731b3e1fb46272675463a760abf4f"} Jan 23 07:17:03 crc kubenswrapper[5102]: I0123 07:17:03.211342 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-external-api-0" podStartSLOduration=5.21132377 podStartE2EDuration="5.21132377s" podCreationTimestamp="2026-01-23 07:16:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:03.204346402 +0000 UTC m=+1374.024695387" watchObservedRunningTime="2026-01-23 07:17:03.21132377 +0000 UTC m=+1374.031672755" Jan 23 07:17:03 crc kubenswrapper[5102]: I0123 07:17:03.229068 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/glance-default-internal-api-0" podStartSLOduration=3.229045705 podStartE2EDuration="3.229045705s" podCreationTimestamp="2026-01-23 07:17:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:03.2282623 +0000 UTC m=+1374.048611295" watchObservedRunningTime="2026-01-23 07:17:03.229045705 +0000 UTC m=+1374.049394680" Jan 23 07:17:05 crc kubenswrapper[5102]: I0123 07:17:05.215137 5102 generic.go:334] "Generic (PLEG): container finished" podID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerID="56868845e6ca22358664384702fa4ce1246131afbda19ddf10daab40d90a98ba" exitCode=0 Jan 23 07:17:05 crc kubenswrapper[5102]: I0123 07:17:05.215236 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerDied","Data":"56868845e6ca22358664384702fa4ce1246131afbda19ddf10daab40d90a98ba"} Jan 23 07:17:07 crc kubenswrapper[5102]: I0123 07:17:07.238517 5102 generic.go:334] "Generic (PLEG): container finished" podID="133dbc1b-39f3-41fa-9489-5cd5777f5865" containerID="da75066d37eacfdc50001d433632728dac882b39846bcaa1807e9ee3518a7bde" exitCode=0 Jan 23 07:17:07 crc kubenswrapper[5102]: I0123 07:17:07.238566 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" event={"ID":"133dbc1b-39f3-41fa-9489-5cd5777f5865","Type":"ContainerDied","Data":"da75066d37eacfdc50001d433632728dac882b39846bcaa1807e9ee3518a7bde"} Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.539445 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.539789 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-external-api-0" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.626890 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.653383 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.723354 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-external-api-0" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.752930 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-config-data\") pod \"133dbc1b-39f3-41fa-9489-5cd5777f5865\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.752986 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-scripts\") pod \"133dbc1b-39f3-41fa-9489-5cd5777f5865\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.753188 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tklf7\" (UniqueName: \"kubernetes.io/projected/133dbc1b-39f3-41fa-9489-5cd5777f5865-kube-api-access-tklf7\") pod \"133dbc1b-39f3-41fa-9489-5cd5777f5865\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.753236 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-combined-ca-bundle\") pod \"133dbc1b-39f3-41fa-9489-5cd5777f5865\" (UID: \"133dbc1b-39f3-41fa-9489-5cd5777f5865\") " Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.762945 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-scripts" (OuterVolumeSpecName: "scripts") pod "133dbc1b-39f3-41fa-9489-5cd5777f5865" (UID: "133dbc1b-39f3-41fa-9489-5cd5777f5865"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.764747 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/133dbc1b-39f3-41fa-9489-5cd5777f5865-kube-api-access-tklf7" (OuterVolumeSpecName: "kube-api-access-tklf7") pod "133dbc1b-39f3-41fa-9489-5cd5777f5865" (UID: "133dbc1b-39f3-41fa-9489-5cd5777f5865"). InnerVolumeSpecName "kube-api-access-tklf7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.780028 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "133dbc1b-39f3-41fa-9489-5cd5777f5865" (UID: "133dbc1b-39f3-41fa-9489-5cd5777f5865"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.786286 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-config-data" (OuterVolumeSpecName: "config-data") pod "133dbc1b-39f3-41fa-9489-5cd5777f5865" (UID: "133dbc1b-39f3-41fa-9489-5cd5777f5865"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.856761 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tklf7\" (UniqueName: \"kubernetes.io/projected/133dbc1b-39f3-41fa-9489-5cd5777f5865-kube-api-access-tklf7\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.856794 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.856804 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:08 crc kubenswrapper[5102]: I0123 07:17:08.856813 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/133dbc1b-39f3-41fa-9489-5cd5777f5865-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.260579 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.260653 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-db-sync-qvfhk" event={"ID":"133dbc1b-39f3-41fa-9489-5cd5777f5865","Type":"ContainerDied","Data":"4f726959b83e78e5163ea39d8ec1d258b9c253dedb3465218a88244dc74df674"} Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.260691 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4f726959b83e78e5163ea39d8ec1d258b9c253dedb3465218a88244dc74df674" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.261631 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.261689 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-external-api-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.482431 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 07:17:09 crc kubenswrapper[5102]: E0123 07:17:09.483266 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="133dbc1b-39f3-41fa-9489-5cd5777f5865" containerName="nova-cell0-conductor-db-sync" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.483314 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="133dbc1b-39f3-41fa-9489-5cd5777f5865" containerName="nova-cell0-conductor-db-sync" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.483785 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="133dbc1b-39f3-41fa-9489-5cd5777f5865" containerName="nova-cell0-conductor-db-sync" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.485140 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.491409 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8n9b4" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.492144 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.496059 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.568835 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.568906 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bcwvv\" (UniqueName: \"kubernetes.io/projected/1893371f-b289-4336-a8ed-1bd78e9191b6-kube-api-access-bcwvv\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.568975 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.670650 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.670719 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bcwvv\" (UniqueName: \"kubernetes.io/projected/1893371f-b289-4336-a8ed-1bd78e9191b6-kube-api-access-bcwvv\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.670771 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.673773 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-conductor-config-data" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.678277 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-combined-ca-bundle\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.686018 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-config-data\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.691733 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bcwvv\" (UniqueName: \"kubernetes.io/projected/1893371f-b289-4336-a8ed-1bd78e9191b6-kube-api-access-bcwvv\") pod \"nova-cell0-conductor-0\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.818656 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-nova-dockercfg-8n9b4" Jan 23 07:17:09 crc kubenswrapper[5102]: I0123 07:17:09.827427 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:10 crc kubenswrapper[5102]: I0123 07:17:10.401912 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 07:17:10 crc kubenswrapper[5102]: I0123 07:17:10.920133 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:10 crc kubenswrapper[5102]: I0123 07:17:10.920188 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:10 crc kubenswrapper[5102]: I0123 07:17:10.955991 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:10 crc kubenswrapper[5102]: I0123 07:17:10.970810 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.279560 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1893371f-b289-4336-a8ed-1bd78e9191b6","Type":"ContainerStarted","Data":"ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605"} Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.279624 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1893371f-b289-4336-a8ed-1bd78e9191b6","Type":"ContainerStarted","Data":"3947eb90e2166603284232579a4b0679a135b85e23b7fee918e28d6ab6d01729"} Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.280250 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.280304 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.280320 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.281138 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.281234 5102 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.283724 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-external-api-0" Jan 23 07:17:11 crc kubenswrapper[5102]: I0123 07:17:11.321377 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-conductor-0" podStartSLOduration=2.321350292 podStartE2EDuration="2.321350292s" podCreationTimestamp="2026-01-23 07:17:09 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:11.303919257 +0000 UTC m=+1382.124268232" watchObservedRunningTime="2026-01-23 07:17:11.321350292 +0000 UTC m=+1382.141699277" Jan 23 07:17:13 crc kubenswrapper[5102]: I0123 07:17:13.150797 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:13 crc kubenswrapper[5102]: I0123 07:17:13.155074 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/glance-default-internal-api-0" Jan 23 07:17:19 crc kubenswrapper[5102]: I0123 07:17:19.873783 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell0-conductor-0" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.544758 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-cell-mapping-7sv4q"] Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.546206 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.548445 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-scripts" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.548569 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-manage-config-data" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.558497 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-7sv4q"] Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.601832 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-scripts\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.602192 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bc6xd\" (UniqueName: \"kubernetes.io/projected/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-kube-api-access-bc6xd\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.602235 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-config-data\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.602346 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.704431 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-scripts\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.704495 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bc6xd\" (UniqueName: \"kubernetes.io/projected/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-kube-api-access-bc6xd\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.704554 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-config-data\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.704643 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.739582 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-combined-ca-bundle\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.752327 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-scripts\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.764983 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-config-data\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.793204 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bc6xd\" (UniqueName: \"kubernetes.io/projected/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-kube-api-access-bc6xd\") pod \"nova-cell0-cell-mapping-7sv4q\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.846419 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.868656 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.869784 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.879836 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.910270 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.928804 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d16577a7-7253-4f86-917a-f398d30ef959-logs\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.928901 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-config-data\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.929001 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l9tsq\" (UniqueName: \"kubernetes.io/projected/d16577a7-7253-4f86-917a-f398d30ef959-kube-api-access-l9tsq\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.929039 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.963630 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.965259 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:20 crc kubenswrapper[5102]: I0123 07:17:20.977961 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.035585 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l9tsq\" (UniqueName: \"kubernetes.io/projected/d16577a7-7253-4f86-917a-f398d30ef959-kube-api-access-l9tsq\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.036294 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.036424 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d16577a7-7253-4f86-917a-f398d30ef959-logs\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.036462 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-config-data\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.039509 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d16577a7-7253-4f86-917a-f398d30ef959-logs\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.043427 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-config-data\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.056628 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.065704 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.083123 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l9tsq\" (UniqueName: \"kubernetes.io/projected/d16577a7-7253-4f86-917a-f398d30ef959-kube-api-access-l9tsq\") pod \"nova-api-0\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.102592 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.103837 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.113909 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.139316 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.139624 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-config-data\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.139717 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9bzvk\" (UniqueName: \"kubernetes.io/projected/159508e3-098e-4a49-b216-12b5496ce36e-kube-api-access-9bzvk\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.139749 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.139835 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/159508e3-098e-4a49-b216-12b5496ce36e-logs\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.195337 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.196499 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.204453 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.215002 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.241958 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-config-data\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.242024 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p2922\" (UniqueName: \"kubernetes.io/projected/17fc342d-0e6c-45f3-b623-4018faf20020-kube-api-access-p2922\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.242058 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9bzvk\" (UniqueName: \"kubernetes.io/projected/159508e3-098e-4a49-b216-12b5496ce36e-kube-api-access-9bzvk\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.242080 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.242131 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.242159 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-config-data\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.242198 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/159508e3-098e-4a49-b216-12b5496ce36e-logs\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.242594 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/159508e3-098e-4a49-b216-12b5496ce36e-logs\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.255215 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.257783 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-8cnll"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.261812 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.266336 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-config-data\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.285663 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.293508 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9bzvk\" (UniqueName: \"kubernetes.io/projected/159508e3-098e-4a49-b216-12b5496ce36e-kube-api-access-9bzvk\") pod \"nova-metadata-0\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.313054 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-8cnll"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.327046 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.343915 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tg7s\" (UniqueName: \"kubernetes.io/projected/18efcf1f-9c02-45d3-bf31-455615a550fc-kube-api-access-6tg7s\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.343979 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.344014 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-config-data\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.344374 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p2922\" (UniqueName: \"kubernetes.io/projected/17fc342d-0e6c-45f3-b623-4018faf20020-kube-api-access-p2922\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.344399 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.344443 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.351025 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.354019 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-config-data\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.366326 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p2922\" (UniqueName: \"kubernetes.io/projected/17fc342d-0e6c-45f3-b623-4018faf20020-kube-api-access-p2922\") pod \"nova-scheduler-0\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449595 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-sb\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449684 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jjd5l\" (UniqueName: \"kubernetes.io/projected/e298e269-6676-4a09-8bb3-7fb0ad38b62e-kube-api-access-jjd5l\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449739 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-svc\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449765 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449796 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-nb\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449821 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449846 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-config\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449865 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tg7s\" (UniqueName: \"kubernetes.io/projected/18efcf1f-9c02-45d3-bf31-455615a550fc-kube-api-access-6tg7s\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.449880 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-swift-storage-0\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.462252 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.465017 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.465217 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.468164 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tg7s\" (UniqueName: \"kubernetes.io/projected/18efcf1f-9c02-45d3-bf31-455615a550fc-kube-api-access-6tg7s\") pod \"nova-cell1-novncproxy-0\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.527000 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.552572 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-sb\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.552670 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jjd5l\" (UniqueName: \"kubernetes.io/projected/e298e269-6676-4a09-8bb3-7fb0ad38b62e-kube-api-access-jjd5l\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.552722 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-svc\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.552755 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-nb\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.552795 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-config\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.552817 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-swift-storage-0\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.553557 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-sb\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.561128 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-swift-storage-0\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.563095 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-nb\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.563312 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-config\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.563408 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-svc\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.584483 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jjd5l\" (UniqueName: \"kubernetes.io/projected/e298e269-6676-4a09-8bb3-7fb0ad38b62e-kube-api-access-jjd5l\") pod \"dnsmasq-dns-647df7b8c5-8cnll\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.599979 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.652067 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-cell-mapping-7sv4q"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.840433 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.857162 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.995780 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vcbp6"] Jan 23 07:17:21 crc kubenswrapper[5102]: I0123 07:17:21.997337 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.001903 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-scripts" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.005722 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.019630 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vcbp6"] Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.047495 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.160673 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.162845 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.162911 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-scripts\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.163364 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-config-data\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.163415 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mk4t8\" (UniqueName: \"kubernetes.io/projected/073f9584-597d-4618-9e0d-4ca37ae233cf-kube-api-access-mk4t8\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.243843 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-8cnll"] Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.264870 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-scripts\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.265015 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-config-data\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.265054 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mk4t8\" (UniqueName: \"kubernetes.io/projected/073f9584-597d-4618-9e0d-4ca37ae233cf-kube-api-access-mk4t8\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.265143 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.269401 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-combined-ca-bundle\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.273018 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-config-data\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.274093 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-scripts\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.288104 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mk4t8\" (UniqueName: \"kubernetes.io/projected/073f9584-597d-4618-9e0d-4ca37ae233cf-kube-api-access-mk4t8\") pod \"nova-cell1-conductor-db-sync-vcbp6\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.330372 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.514128 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" event={"ID":"e298e269-6676-4a09-8bb3-7fb0ad38b62e","Type":"ContainerStarted","Data":"aa9b7bc6b516c40ca49d565a8331c7bfb2b1673b96e739af53910207402e5b69"} Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.520827 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17fc342d-0e6c-45f3-b623-4018faf20020","Type":"ContainerStarted","Data":"f99b46d31f0aa251d2f4d1e3d8f697c0b9cd665573a11e14bbf34f3549d7eea9"} Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.524094 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"159508e3-098e-4a49-b216-12b5496ce36e","Type":"ContainerStarted","Data":"1247d7c40fe068741371b4af2fa3b4cfd6807cbb39ff5d8f2a27ab4cbbf702fc"} Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.540791 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"18efcf1f-9c02-45d3-bf31-455615a550fc","Type":"ContainerStarted","Data":"779a566c38935fa2eed184bd5cb5896833cad0b0429121fe578c8d8cbc2414f5"} Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.543989 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7sv4q" event={"ID":"7f6ab118-4fb2-4b08-a015-08d76f3fcb38","Type":"ContainerStarted","Data":"e23bfd42af579c1d45817a614581f3e96036be46df77c3eedc855c2a0de3c7f6"} Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.548841 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d16577a7-7253-4f86-917a-f398d30ef959","Type":"ContainerStarted","Data":"911ecba1433c3e37b298295a2d1ac434b8b740f9eed0a85bd8f6cf1ab1b965bc"} Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.620954 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/ceilometer-0" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="proxy-httpd" probeResult="failure" output="HTTP probe failed with statuscode: 503" Jan 23 07:17:22 crc kubenswrapper[5102]: I0123 07:17:22.792446 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vcbp6"] Jan 23 07:17:23 crc kubenswrapper[5102]: I0123 07:17:23.565042 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7sv4q" event={"ID":"7f6ab118-4fb2-4b08-a015-08d76f3fcb38","Type":"ContainerStarted","Data":"174c02431f1c59ad7f9abe23084157209612d312995dcf7b1c90091bc0f8b4d9"} Jan 23 07:17:23 crc kubenswrapper[5102]: I0123 07:17:23.575812 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" event={"ID":"073f9584-597d-4618-9e0d-4ca37ae233cf","Type":"ContainerStarted","Data":"83f6688858929172569adc29be5050e472a2b130da335ee56b80e52deff26332"} Jan 23 07:17:23 crc kubenswrapper[5102]: I0123 07:17:23.575858 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" event={"ID":"073f9584-597d-4618-9e0d-4ca37ae233cf","Type":"ContainerStarted","Data":"e2cc1fc7881e7044f7089571b27c4f04ac2e3d8a653f379631722fdd898fa3db"} Jan 23 07:17:23 crc kubenswrapper[5102]: I0123 07:17:23.580240 5102 generic.go:334] "Generic (PLEG): container finished" podID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerID="00587c5fdc2f7fa4ac7a71ae4b0bd1f265b5b9ada9f53957d95859e9dddd5fd2" exitCode=0 Jan 23 07:17:23 crc kubenswrapper[5102]: I0123 07:17:23.580296 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" event={"ID":"e298e269-6676-4a09-8bb3-7fb0ad38b62e","Type":"ContainerDied","Data":"00587c5fdc2f7fa4ac7a71ae4b0bd1f265b5b9ada9f53957d95859e9dddd5fd2"} Jan 23 07:17:23 crc kubenswrapper[5102]: I0123 07:17:23.595415 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell0-cell-mapping-7sv4q" podStartSLOduration=3.595391446 podStartE2EDuration="3.595391446s" podCreationTimestamp="2026-01-23 07:17:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:23.582010177 +0000 UTC m=+1394.402359152" watchObservedRunningTime="2026-01-23 07:17:23.595391446 +0000 UTC m=+1394.415740421" Jan 23 07:17:23 crc kubenswrapper[5102]: I0123 07:17:23.600511 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" podStartSLOduration=2.600497565 podStartE2EDuration="2.600497565s" podCreationTimestamp="2026-01-23 07:17:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:23.59997047 +0000 UTC m=+1394.420319445" watchObservedRunningTime="2026-01-23 07:17:23.600497565 +0000 UTC m=+1394.420846540" Jan 23 07:17:25 crc kubenswrapper[5102]: I0123 07:17:25.253303 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:25 crc kubenswrapper[5102]: I0123 07:17:25.278710 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.633084 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"18efcf1f-9c02-45d3-bf31-455615a550fc","Type":"ContainerStarted","Data":"c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef"} Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.633236 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="18efcf1f-9c02-45d3-bf31-455615a550fc" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef" gracePeriod=30 Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.641263 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d16577a7-7253-4f86-917a-f398d30ef959","Type":"ContainerStarted","Data":"eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c"} Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.641307 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d16577a7-7253-4f86-917a-f398d30ef959","Type":"ContainerStarted","Data":"e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7"} Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.645573 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" event={"ID":"e298e269-6676-4a09-8bb3-7fb0ad38b62e","Type":"ContainerStarted","Data":"e8f705bb6c2768891290fe7fca4e7d7c19573450ece317925fe843b3d7ba3b1f"} Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.646450 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.648132 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17fc342d-0e6c-45f3-b623-4018faf20020","Type":"ContainerStarted","Data":"945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3"} Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.654050 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"159508e3-098e-4a49-b216-12b5496ce36e","Type":"ContainerStarted","Data":"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e"} Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.654103 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"159508e3-098e-4a49-b216-12b5496ce36e","Type":"ContainerStarted","Data":"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5"} Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.654248 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-log" containerID="cri-o://c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5" gracePeriod=30 Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.654601 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-metadata" containerID="cri-o://f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e" gracePeriod=30 Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.655325 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.431718731 podStartE2EDuration="5.655315401s" podCreationTimestamp="2026-01-23 07:17:21 +0000 UTC" firstStartedPulling="2026-01-23 07:17:22.161761349 +0000 UTC m=+1392.982110324" lastFinishedPulling="2026-01-23 07:17:25.385358029 +0000 UTC m=+1396.205706994" observedRunningTime="2026-01-23 07:17:26.647254929 +0000 UTC m=+1397.467603894" watchObservedRunningTime="2026-01-23 07:17:26.655315401 +0000 UTC m=+1397.475664376" Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.678223 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" podStartSLOduration=5.678197927 podStartE2EDuration="5.678197927s" podCreationTimestamp="2026-01-23 07:17:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:26.669916438 +0000 UTC m=+1397.490265413" watchObservedRunningTime="2026-01-23 07:17:26.678197927 +0000 UTC m=+1397.498546902" Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.691854 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.182135737 podStartE2EDuration="6.691835024s" podCreationTimestamp="2026-01-23 07:17:20 +0000 UTC" firstStartedPulling="2026-01-23 07:17:21.883038793 +0000 UTC m=+1392.703387768" lastFinishedPulling="2026-01-23 07:17:25.39273809 +0000 UTC m=+1396.213087055" observedRunningTime="2026-01-23 07:17:26.686199518 +0000 UTC m=+1397.506548493" watchObservedRunningTime="2026-01-23 07:17:26.691835024 +0000 UTC m=+1397.512183999" Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.715707 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=3.386882829 podStartE2EDuration="6.715686202s" podCreationTimestamp="2026-01-23 07:17:20 +0000 UTC" firstStartedPulling="2026-01-23 07:17:22.068368016 +0000 UTC m=+1392.888716991" lastFinishedPulling="2026-01-23 07:17:25.397171399 +0000 UTC m=+1396.217520364" observedRunningTime="2026-01-23 07:17:26.706283807 +0000 UTC m=+1397.526632782" watchObservedRunningTime="2026-01-23 07:17:26.715686202 +0000 UTC m=+1397.536035177" Jan 23 07:17:26 crc kubenswrapper[5102]: I0123 07:17:26.734181 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.226619969 podStartE2EDuration="6.73415983s" podCreationTimestamp="2026-01-23 07:17:20 +0000 UTC" firstStartedPulling="2026-01-23 07:17:21.884304782 +0000 UTC m=+1392.704653757" lastFinishedPulling="2026-01-23 07:17:25.391844643 +0000 UTC m=+1396.212193618" observedRunningTime="2026-01-23 07:17:26.726842521 +0000 UTC m=+1397.547191516" watchObservedRunningTime="2026-01-23 07:17:26.73415983 +0000 UTC m=+1397.554508805" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.255947 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.371372 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/159508e3-098e-4a49-b216-12b5496ce36e-logs\") pod \"159508e3-098e-4a49-b216-12b5496ce36e\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.371524 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-combined-ca-bundle\") pod \"159508e3-098e-4a49-b216-12b5496ce36e\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.371701 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/159508e3-098e-4a49-b216-12b5496ce36e-logs" (OuterVolumeSpecName: "logs") pod "159508e3-098e-4a49-b216-12b5496ce36e" (UID: "159508e3-098e-4a49-b216-12b5496ce36e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.371732 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-config-data\") pod \"159508e3-098e-4a49-b216-12b5496ce36e\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.371859 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9bzvk\" (UniqueName: \"kubernetes.io/projected/159508e3-098e-4a49-b216-12b5496ce36e-kube-api-access-9bzvk\") pod \"159508e3-098e-4a49-b216-12b5496ce36e\" (UID: \"159508e3-098e-4a49-b216-12b5496ce36e\") " Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.372692 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/159508e3-098e-4a49-b216-12b5496ce36e-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.377556 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/159508e3-098e-4a49-b216-12b5496ce36e-kube-api-access-9bzvk" (OuterVolumeSpecName: "kube-api-access-9bzvk") pod "159508e3-098e-4a49-b216-12b5496ce36e" (UID: "159508e3-098e-4a49-b216-12b5496ce36e"). InnerVolumeSpecName "kube-api-access-9bzvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.404221 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-config-data" (OuterVolumeSpecName: "config-data") pod "159508e3-098e-4a49-b216-12b5496ce36e" (UID: "159508e3-098e-4a49-b216-12b5496ce36e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.410697 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "159508e3-098e-4a49-b216-12b5496ce36e" (UID: "159508e3-098e-4a49-b216-12b5496ce36e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.474439 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.474478 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9bzvk\" (UniqueName: \"kubernetes.io/projected/159508e3-098e-4a49-b216-12b5496ce36e-kube-api-access-9bzvk\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.474490 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/159508e3-098e-4a49-b216-12b5496ce36e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.665658 5102 generic.go:334] "Generic (PLEG): container finished" podID="159508e3-098e-4a49-b216-12b5496ce36e" containerID="f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e" exitCode=0 Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.665694 5102 generic.go:334] "Generic (PLEG): container finished" podID="159508e3-098e-4a49-b216-12b5496ce36e" containerID="c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5" exitCode=143 Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.665710 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.665777 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"159508e3-098e-4a49-b216-12b5496ce36e","Type":"ContainerDied","Data":"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e"} Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.665832 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"159508e3-098e-4a49-b216-12b5496ce36e","Type":"ContainerDied","Data":"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5"} Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.665849 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"159508e3-098e-4a49-b216-12b5496ce36e","Type":"ContainerDied","Data":"1247d7c40fe068741371b4af2fa3b4cfd6807cbb39ff5d8f2a27ab4cbbf702fc"} Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.665873 5102 scope.go:117] "RemoveContainer" containerID="f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.698914 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.699627 5102 scope.go:117] "RemoveContainer" containerID="c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.714908 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.731970 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:27 crc kubenswrapper[5102]: E0123 07:17:27.732381 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-metadata" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.732399 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-metadata" Jan 23 07:17:27 crc kubenswrapper[5102]: E0123 07:17:27.732440 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-log" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.732447 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-log" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.732683 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-metadata" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.732705 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="159508e3-098e-4a49-b216-12b5496ce36e" containerName="nova-metadata-log" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.732842 5102 scope.go:117] "RemoveContainer" containerID="f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.733810 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: E0123 07:17:27.734197 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e\": container with ID starting with f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e not found: ID does not exist" containerID="f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.734233 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e"} err="failed to get container status \"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e\": rpc error: code = NotFound desc = could not find container \"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e\": container with ID starting with f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e not found: ID does not exist" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.734275 5102 scope.go:117] "RemoveContainer" containerID="c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5" Jan 23 07:17:27 crc kubenswrapper[5102]: E0123 07:17:27.734708 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5\": container with ID starting with c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5 not found: ID does not exist" containerID="c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.734730 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5"} err="failed to get container status \"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5\": rpc error: code = NotFound desc = could not find container \"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5\": container with ID starting with c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5 not found: ID does not exist" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.734746 5102 scope.go:117] "RemoveContainer" containerID="f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.734920 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e"} err="failed to get container status \"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e\": rpc error: code = NotFound desc = could not find container \"f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e\": container with ID starting with f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e not found: ID does not exist" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.734949 5102 scope.go:117] "RemoveContainer" containerID="c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.735119 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5"} err="failed to get container status \"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5\": rpc error: code = NotFound desc = could not find container \"c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5\": container with ID starting with c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5 not found: ID does not exist" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.735553 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.736442 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.756039 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.883521 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-logs\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.883990 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.884027 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n6lcj\" (UniqueName: \"kubernetes.io/projected/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-kube-api-access-n6lcj\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.884134 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.884159 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-config-data\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.985609 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-logs\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.985689 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.985732 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n6lcj\" (UniqueName: \"kubernetes.io/projected/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-kube-api-access-n6lcj\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.985773 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.985802 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-config-data\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.986991 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-logs\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.989850 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:27 crc kubenswrapper[5102]: I0123 07:17:27.990949 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-config-data\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.005763 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.011118 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n6lcj\" (UniqueName: \"kubernetes.io/projected/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-kube-api-access-n6lcj\") pod \"nova-metadata-0\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " pod="openstack/nova-metadata-0" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.058287 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:28 crc kubenswrapper[5102]: W0123 07:17:28.189295 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode298e269_6676_4a09_8bb3_7fb0ad38b62e.slice/crio-conmon-00587c5fdc2f7fa4ac7a71ae4b0bd1f265b5b9ada9f53957d95859e9dddd5fd2.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode298e269_6676_4a09_8bb3_7fb0ad38b62e.slice/crio-conmon-00587c5fdc2f7fa4ac7a71ae4b0bd1f265b5b9ada9f53957d95859e9dddd5fd2.scope: no such file or directory Jan 23 07:17:28 crc kubenswrapper[5102]: W0123 07:17:28.189631 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode298e269_6676_4a09_8bb3_7fb0ad38b62e.slice/crio-00587c5fdc2f7fa4ac7a71ae4b0bd1f265b5b9ada9f53957d95859e9dddd5fd2.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode298e269_6676_4a09_8bb3_7fb0ad38b62e.slice/crio-00587c5fdc2f7fa4ac7a71ae4b0bd1f265b5b9ada9f53957d95859e9dddd5fd2.scope: no such file or directory Jan 23 07:17:28 crc kubenswrapper[5102]: W0123 07:17:28.211395 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-conmon-c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-conmon-c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5.scope: no such file or directory Jan 23 07:17:28 crc kubenswrapper[5102]: W0123 07:17:28.215725 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-c74f419e2930a69c36d2e9f44eb62f858838008a31682c9a5a331e52af43b4d5.scope: no such file or directory Jan 23 07:17:28 crc kubenswrapper[5102]: W0123 07:17:28.223866 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-conmon-f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-conmon-f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e.scope: no such file or directory Jan 23 07:17:28 crc kubenswrapper[5102]: W0123 07:17:28.225184 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice/crio-f05d5ce5fc9d962892e2b0863fcf45ba889862f523eec53ddc3c0462632e781e.scope: no such file or directory Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.362101 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:28 crc kubenswrapper[5102]: E0123 07:17:28.561328 5102 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod159508e3_098e_4a49_b216_12b5496ce36e.slice\": RecentStats: unable to find data in memory cache]" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.679435 5102 generic.go:334] "Generic (PLEG): container finished" podID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerID="edb6be1ebb76f4f9ff7af6fb07ea4a47e81a16d609e59271fddd77fb1ec66f5f" exitCode=137 Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.679511 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerDied","Data":"edb6be1ebb76f4f9ff7af6fb07ea4a47e81a16d609e59271fddd77fb1ec66f5f"} Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.680828 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"13aa228b-3510-4424-b9dd-e9745a2420e6","Type":"ContainerDied","Data":"18c4584e0f9e0acdf2b6515614c618510847889ab5fc379a726527c92d4c7a66"} Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.680896 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="18c4584e0f9e0acdf2b6515614c618510847889ab5fc379a726527c92d4c7a66" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.693558 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247","Type":"ContainerStarted","Data":"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97"} Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.693594 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247","Type":"ContainerStarted","Data":"e952502856bf72ca2b7fc84023aa49ba03633c6e79d79757d8a9b1ed769260d1"} Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.732884 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.909894 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-scripts\") pod \"13aa228b-3510-4424-b9dd-e9745a2420e6\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.909956 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-log-httpd\") pod \"13aa228b-3510-4424-b9dd-e9745a2420e6\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.910009 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-run-httpd\") pod \"13aa228b-3510-4424-b9dd-e9745a2420e6\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.910071 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-sg-core-conf-yaml\") pod \"13aa228b-3510-4424-b9dd-e9745a2420e6\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.910113 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-config-data\") pod \"13aa228b-3510-4424-b9dd-e9745a2420e6\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.910168 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-combined-ca-bundle\") pod \"13aa228b-3510-4424-b9dd-e9745a2420e6\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.910238 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l5dwh\" (UniqueName: \"kubernetes.io/projected/13aa228b-3510-4424-b9dd-e9745a2420e6-kube-api-access-l5dwh\") pod \"13aa228b-3510-4424-b9dd-e9745a2420e6\" (UID: \"13aa228b-3510-4424-b9dd-e9745a2420e6\") " Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.911728 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "13aa228b-3510-4424-b9dd-e9745a2420e6" (UID: "13aa228b-3510-4424-b9dd-e9745a2420e6"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.911859 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "13aa228b-3510-4424-b9dd-e9745a2420e6" (UID: "13aa228b-3510-4424-b9dd-e9745a2420e6"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.918795 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/13aa228b-3510-4424-b9dd-e9745a2420e6-kube-api-access-l5dwh" (OuterVolumeSpecName: "kube-api-access-l5dwh") pod "13aa228b-3510-4424-b9dd-e9745a2420e6" (UID: "13aa228b-3510-4424-b9dd-e9745a2420e6"). InnerVolumeSpecName "kube-api-access-l5dwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.918827 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-scripts" (OuterVolumeSpecName: "scripts") pod "13aa228b-3510-4424-b9dd-e9745a2420e6" (UID: "13aa228b-3510-4424-b9dd-e9745a2420e6"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.946127 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "13aa228b-3510-4424-b9dd-e9745a2420e6" (UID: "13aa228b-3510-4424-b9dd-e9745a2420e6"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:28 crc kubenswrapper[5102]: I0123 07:17:28.987910 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "13aa228b-3510-4424-b9dd-e9745a2420e6" (UID: "13aa228b-3510-4424-b9dd-e9745a2420e6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.013243 5102 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.013282 5102 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.013297 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.013311 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l5dwh\" (UniqueName: \"kubernetes.io/projected/13aa228b-3510-4424-b9dd-e9745a2420e6-kube-api-access-l5dwh\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.013324 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.013335 5102 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/13aa228b-3510-4424-b9dd-e9745a2420e6-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.027881 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-config-data" (OuterVolumeSpecName: "config-data") pod "13aa228b-3510-4424-b9dd-e9745a2420e6" (UID: "13aa228b-3510-4424-b9dd-e9745a2420e6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.114647 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/13aa228b-3510-4424-b9dd-e9745a2420e6-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.629952 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="159508e3-098e-4a49-b216-12b5496ce36e" path="/var/lib/kubelet/pods/159508e3-098e-4a49-b216-12b5496ce36e/volumes" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.706763 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.711920 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247","Type":"ContainerStarted","Data":"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c"} Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.771864 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.771843278 podStartE2EDuration="2.771843278s" podCreationTimestamp="2026-01-23 07:17:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:29.739222007 +0000 UTC m=+1400.559571012" watchObservedRunningTime="2026-01-23 07:17:29.771843278 +0000 UTC m=+1400.592192253" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.773780 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.784378 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.794746 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:17:29 crc kubenswrapper[5102]: E0123 07:17:29.795401 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-central-agent" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.795469 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-central-agent" Jan 23 07:17:29 crc kubenswrapper[5102]: E0123 07:17:29.795545 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="sg-core" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.795599 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="sg-core" Jan 23 07:17:29 crc kubenswrapper[5102]: E0123 07:17:29.795663 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="proxy-httpd" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.795716 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="proxy-httpd" Jan 23 07:17:29 crc kubenswrapper[5102]: E0123 07:17:29.795797 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-notification-agent" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.795858 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-notification-agent" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.796077 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-notification-agent" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.796153 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="sg-core" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.796216 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="proxy-httpd" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.796278 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" containerName="ceilometer-central-agent" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.797878 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.803282 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.803756 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.817267 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.934550 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gt6jz\" (UniqueName: \"kubernetes.io/projected/f87cc0fb-642a-4af4-b080-049a7a29440d-kube-api-access-gt6jz\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.934633 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-run-httpd\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.934686 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-config-data\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.934787 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-log-httpd\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.934830 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.934852 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-scripts\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:29 crc kubenswrapper[5102]: I0123 07:17:29.934963 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.037170 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-config-data\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.037235 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-log-httpd\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.037258 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.037274 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-scripts\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.037318 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.037392 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gt6jz\" (UniqueName: \"kubernetes.io/projected/f87cc0fb-642a-4af4-b080-049a7a29440d-kube-api-access-gt6jz\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.037428 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-run-httpd\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.038102 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-run-httpd\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.038233 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-log-httpd\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.043367 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.044007 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.044480 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-scripts\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.060155 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-config-data\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.069143 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gt6jz\" (UniqueName: \"kubernetes.io/projected/f87cc0fb-642a-4af4-b080-049a7a29440d-kube-api-access-gt6jz\") pod \"ceilometer-0\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.121159 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:17:30 crc kubenswrapper[5102]: I0123 07:17:30.762340 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.216345 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.216891 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.466063 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.466520 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.499960 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.527186 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.616666 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13aa228b-3510-4424-b9dd-e9745a2420e6" path="/var/lib/kubelet/pods/13aa228b-3510-4424-b9dd-e9745a2420e6/volumes" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.618057 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.693442 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-qshcb"] Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.693859 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" podUID="00cc675d-d659-4c3a-ad06-589435804d40" containerName="dnsmasq-dns" containerID="cri-o://1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b" gracePeriod=10 Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.743392 5102 generic.go:334] "Generic (PLEG): container finished" podID="7f6ab118-4fb2-4b08-a015-08d76f3fcb38" containerID="174c02431f1c59ad7f9abe23084157209612d312995dcf7b1c90091bc0f8b4d9" exitCode=0 Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.743456 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7sv4q" event={"ID":"7f6ab118-4fb2-4b08-a015-08d76f3fcb38","Type":"ContainerDied","Data":"174c02431f1c59ad7f9abe23084157209612d312995dcf7b1c90091bc0f8b4d9"} Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.750416 5102 generic.go:334] "Generic (PLEG): container finished" podID="073f9584-597d-4618-9e0d-4ca37ae233cf" containerID="83f6688858929172569adc29be5050e472a2b130da335ee56b80e52deff26332" exitCode=0 Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.750488 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" event={"ID":"073f9584-597d-4618-9e0d-4ca37ae233cf","Type":"ContainerDied","Data":"83f6688858929172569adc29be5050e472a2b130da335ee56b80e52deff26332"} Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.752593 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerStarted","Data":"b59fb2f36892c8b6fab1579f8ab5cd8c2a9f8af79555a1e6562cbe776526632e"} Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.752620 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerStarted","Data":"4a4eb35f51f45ca4111cdd5ab63c59541e27f8708aa2a06ee04a6c6fe60a7f6d"} Jan 23 07:17:31 crc kubenswrapper[5102]: I0123 07:17:31.805797 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.272825 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.299896 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.299935 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.187:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.390445 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-svc\") pod \"00cc675d-d659-4c3a-ad06-589435804d40\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.390532 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-nb\") pod \"00cc675d-d659-4c3a-ad06-589435804d40\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.390615 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-sb\") pod \"00cc675d-d659-4c3a-ad06-589435804d40\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.390705 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-config\") pod \"00cc675d-d659-4c3a-ad06-589435804d40\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.390838 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-swift-storage-0\") pod \"00cc675d-d659-4c3a-ad06-589435804d40\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.390986 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jm68f\" (UniqueName: \"kubernetes.io/projected/00cc675d-d659-4c3a-ad06-589435804d40-kube-api-access-jm68f\") pod \"00cc675d-d659-4c3a-ad06-589435804d40\" (UID: \"00cc675d-d659-4c3a-ad06-589435804d40\") " Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.398704 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/00cc675d-d659-4c3a-ad06-589435804d40-kube-api-access-jm68f" (OuterVolumeSpecName: "kube-api-access-jm68f") pod "00cc675d-d659-4c3a-ad06-589435804d40" (UID: "00cc675d-d659-4c3a-ad06-589435804d40"). InnerVolumeSpecName "kube-api-access-jm68f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.462337 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-config" (OuterVolumeSpecName: "config") pod "00cc675d-d659-4c3a-ad06-589435804d40" (UID: "00cc675d-d659-4c3a-ad06-589435804d40"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.467023 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "00cc675d-d659-4c3a-ad06-589435804d40" (UID: "00cc675d-d659-4c3a-ad06-589435804d40"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.475153 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "00cc675d-d659-4c3a-ad06-589435804d40" (UID: "00cc675d-d659-4c3a-ad06-589435804d40"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.476062 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "00cc675d-d659-4c3a-ad06-589435804d40" (UID: "00cc675d-d659-4c3a-ad06-589435804d40"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.490486 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "00cc675d-d659-4c3a-ad06-589435804d40" (UID: "00cc675d-d659-4c3a-ad06-589435804d40"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.492891 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.492917 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jm68f\" (UniqueName: \"kubernetes.io/projected/00cc675d-d659-4c3a-ad06-589435804d40-kube-api-access-jm68f\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.492932 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.492942 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.492953 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.492962 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/00cc675d-d659-4c3a-ad06-589435804d40-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.762497 5102 generic.go:334] "Generic (PLEG): container finished" podID="00cc675d-d659-4c3a-ad06-589435804d40" containerID="1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b" exitCode=0 Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.762617 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.762606 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" event={"ID":"00cc675d-d659-4c3a-ad06-589435804d40","Type":"ContainerDied","Data":"1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b"} Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.763261 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-75dbb546bf-qshcb" event={"ID":"00cc675d-d659-4c3a-ad06-589435804d40","Type":"ContainerDied","Data":"ee9411c3dcea3f6143759a7127717dac321f9d0fb5c9c81c65647225381c2df9"} Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.763322 5102 scope.go:117] "RemoveContainer" containerID="1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.765321 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerStarted","Data":"5e53a68a7de19de993fc494afb2a5deb3586933db407c709b7e878f663584cdd"} Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.837964 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-qshcb"] Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.847754 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-75dbb546bf-qshcb"] Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.862907 5102 scope.go:117] "RemoveContainer" containerID="42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.915480 5102 scope.go:117] "RemoveContainer" containerID="1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b" Jan 23 07:17:32 crc kubenswrapper[5102]: E0123 07:17:32.916280 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b\": container with ID starting with 1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b not found: ID does not exist" containerID="1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.916316 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b"} err="failed to get container status \"1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b\": rpc error: code = NotFound desc = could not find container \"1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b\": container with ID starting with 1d87b6e774db3f7a8bc1cf1120f7b92a5cdb64c262579b94024b96ac4e11a58b not found: ID does not exist" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.916354 5102 scope.go:117] "RemoveContainer" containerID="42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5" Jan 23 07:17:32 crc kubenswrapper[5102]: E0123 07:17:32.920583 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5\": container with ID starting with 42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5 not found: ID does not exist" containerID="42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5" Jan 23 07:17:32 crc kubenswrapper[5102]: I0123 07:17:32.920796 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5"} err="failed to get container status \"42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5\": rpc error: code = NotFound desc = could not find container \"42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5\": container with ID starting with 42c18819e4e64bd359c4d652898ae34df67c3a97fd792d4416225b6983c942e5 not found: ID does not exist" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.059409 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.060596 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.278513 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.297386 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416038 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mk4t8\" (UniqueName: \"kubernetes.io/projected/073f9584-597d-4618-9e0d-4ca37ae233cf-kube-api-access-mk4t8\") pod \"073f9584-597d-4618-9e0d-4ca37ae233cf\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416098 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-combined-ca-bundle\") pod \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416176 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-scripts\") pod \"073f9584-597d-4618-9e0d-4ca37ae233cf\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416226 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-combined-ca-bundle\") pod \"073f9584-597d-4618-9e0d-4ca37ae233cf\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416252 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bc6xd\" (UniqueName: \"kubernetes.io/projected/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-kube-api-access-bc6xd\") pod \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416282 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-config-data\") pod \"073f9584-597d-4618-9e0d-4ca37ae233cf\" (UID: \"073f9584-597d-4618-9e0d-4ca37ae233cf\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416305 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-scripts\") pod \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.416398 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-config-data\") pod \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\" (UID: \"7f6ab118-4fb2-4b08-a015-08d76f3fcb38\") " Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.422323 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/073f9584-597d-4618-9e0d-4ca37ae233cf-kube-api-access-mk4t8" (OuterVolumeSpecName: "kube-api-access-mk4t8") pod "073f9584-597d-4618-9e0d-4ca37ae233cf" (UID: "073f9584-597d-4618-9e0d-4ca37ae233cf"). InnerVolumeSpecName "kube-api-access-mk4t8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.425755 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-kube-api-access-bc6xd" (OuterVolumeSpecName: "kube-api-access-bc6xd") pod "7f6ab118-4fb2-4b08-a015-08d76f3fcb38" (UID: "7f6ab118-4fb2-4b08-a015-08d76f3fcb38"). InnerVolumeSpecName "kube-api-access-bc6xd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.428862 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-scripts" (OuterVolumeSpecName: "scripts") pod "073f9584-597d-4618-9e0d-4ca37ae233cf" (UID: "073f9584-597d-4618-9e0d-4ca37ae233cf"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.431759 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-scripts" (OuterVolumeSpecName: "scripts") pod "7f6ab118-4fb2-4b08-a015-08d76f3fcb38" (UID: "7f6ab118-4fb2-4b08-a015-08d76f3fcb38"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.451781 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "073f9584-597d-4618-9e0d-4ca37ae233cf" (UID: "073f9584-597d-4618-9e0d-4ca37ae233cf"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.457968 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-config-data" (OuterVolumeSpecName: "config-data") pod "7f6ab118-4fb2-4b08-a015-08d76f3fcb38" (UID: "7f6ab118-4fb2-4b08-a015-08d76f3fcb38"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.466609 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-config-data" (OuterVolumeSpecName: "config-data") pod "073f9584-597d-4618-9e0d-4ca37ae233cf" (UID: "073f9584-597d-4618-9e0d-4ca37ae233cf"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.487118 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "7f6ab118-4fb2-4b08-a015-08d76f3fcb38" (UID: "7f6ab118-4fb2-4b08-a015-08d76f3fcb38"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519023 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519069 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bc6xd\" (UniqueName: \"kubernetes.io/projected/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-kube-api-access-bc6xd\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519080 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519090 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519098 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519106 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mk4t8\" (UniqueName: \"kubernetes.io/projected/073f9584-597d-4618-9e0d-4ca37ae233cf-kube-api-access-mk4t8\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519114 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/7f6ab118-4fb2-4b08-a015-08d76f3fcb38-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.519123 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/073f9584-597d-4618-9e0d-4ca37ae233cf-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.608203 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="00cc675d-d659-4c3a-ad06-589435804d40" path="/var/lib/kubelet/pods/00cc675d-d659-4c3a-ad06-589435804d40/volumes" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.778389 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" event={"ID":"073f9584-597d-4618-9e0d-4ca37ae233cf","Type":"ContainerDied","Data":"e2cc1fc7881e7044f7089571b27c4f04ac2e3d8a653f379631722fdd898fa3db"} Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.778435 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e2cc1fc7881e7044f7089571b27c4f04ac2e3d8a653f379631722fdd898fa3db" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.778495 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-db-sync-vcbp6" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.782372 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerStarted","Data":"011f058fbe518801efdd080d127e821f519b48af0ea6779adcfe136e1589979c"} Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.795100 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-cell-mapping-7sv4q" event={"ID":"7f6ab118-4fb2-4b08-a015-08d76f3fcb38","Type":"ContainerDied","Data":"e23bfd42af579c1d45817a614581f3e96036be46df77c3eedc855c2a0de3c7f6"} Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.795147 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="e23bfd42af579c1d45817a614581f3e96036be46df77c3eedc855c2a0de3c7f6" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.795181 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-cell-mapping-7sv4q" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.976531 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 07:17:33 crc kubenswrapper[5102]: E0123 07:17:33.978020 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00cc675d-d659-4c3a-ad06-589435804d40" containerName="init" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.978040 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="00cc675d-d659-4c3a-ad06-589435804d40" containerName="init" Jan 23 07:17:33 crc kubenswrapper[5102]: E0123 07:17:33.978056 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="00cc675d-d659-4c3a-ad06-589435804d40" containerName="dnsmasq-dns" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.978063 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="00cc675d-d659-4c3a-ad06-589435804d40" containerName="dnsmasq-dns" Jan 23 07:17:33 crc kubenswrapper[5102]: E0123 07:17:33.978090 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="073f9584-597d-4618-9e0d-4ca37ae233cf" containerName="nova-cell1-conductor-db-sync" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.978097 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="073f9584-597d-4618-9e0d-4ca37ae233cf" containerName="nova-cell1-conductor-db-sync" Jan 23 07:17:33 crc kubenswrapper[5102]: E0123 07:17:33.978138 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7f6ab118-4fb2-4b08-a015-08d76f3fcb38" containerName="nova-manage" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.978144 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="7f6ab118-4fb2-4b08-a015-08d76f3fcb38" containerName="nova-manage" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.978432 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="073f9584-597d-4618-9e0d-4ca37ae233cf" containerName="nova-cell1-conductor-db-sync" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.978456 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="00cc675d-d659-4c3a-ad06-589435804d40" containerName="dnsmasq-dns" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.978478 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="7f6ab118-4fb2-4b08-a015-08d76f3fcb38" containerName="nova-manage" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.979232 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:33 crc kubenswrapper[5102]: I0123 07:17:33.990076 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-conductor-config-data" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.027150 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.031982 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.032349 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.032468 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p75nc\" (UniqueName: \"kubernetes.io/projected/6c65ea3f-14be-4130-b116-2291c114323e-kube-api-access-p75nc\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.108111 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.108347 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-log" containerID="cri-o://e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7" gracePeriod=30 Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.108494 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-api" containerID="cri-o://eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c" gracePeriod=30 Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.138642 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.138734 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.138776 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p75nc\" (UniqueName: \"kubernetes.io/projected/6c65ea3f-14be-4130-b116-2291c114323e-kube-api-access-p75nc\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.147224 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-combined-ca-bundle\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.148193 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-config-data\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.169052 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.169255 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p75nc\" (UniqueName: \"kubernetes.io/projected/6c65ea3f-14be-4130-b116-2291c114323e-kube-api-access-p75nc\") pod \"nova-cell1-conductor-0\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.206773 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.320068 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.783076 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.805982 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6c65ea3f-14be-4130-b116-2291c114323e","Type":"ContainerStarted","Data":"59b32d5bb78bc24a7466a3a43cd2d5e32a35e2aa5e1fd3029a30083169caef7a"} Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.825815 5102 generic.go:334] "Generic (PLEG): container finished" podID="d16577a7-7253-4f86-917a-f398d30ef959" containerID="e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7" exitCode=143 Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.826223 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d16577a7-7253-4f86-917a-f398d30ef959","Type":"ContainerDied","Data":"e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7"} Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.826530 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="17fc342d-0e6c-45f3-b623-4018faf20020" containerName="nova-scheduler-scheduler" containerID="cri-o://945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3" gracePeriod=30 Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.826826 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-log" containerID="cri-o://9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97" gracePeriod=30 Jan 23 07:17:34 crc kubenswrapper[5102]: I0123 07:17:34.827190 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-metadata" containerID="cri-o://a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c" gracePeriod=30 Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.359782 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.464774 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n6lcj\" (UniqueName: \"kubernetes.io/projected/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-kube-api-access-n6lcj\") pod \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.465122 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-nova-metadata-tls-certs\") pod \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.465151 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-combined-ca-bundle\") pod \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.465183 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-config-data\") pod \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.465261 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-logs\") pod \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\" (UID: \"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247\") " Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.466119 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-logs" (OuterVolumeSpecName: "logs") pod "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" (UID: "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.470026 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-kube-api-access-n6lcj" (OuterVolumeSpecName: "kube-api-access-n6lcj") pod "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" (UID: "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247"). InnerVolumeSpecName "kube-api-access-n6lcj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.493092 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" (UID: "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.502970 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-config-data" (OuterVolumeSpecName: "config-data") pod "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" (UID: "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.522815 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" (UID: "a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.573994 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n6lcj\" (UniqueName: \"kubernetes.io/projected/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-kube-api-access-n6lcj\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.574035 5102 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.574046 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.574055 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.574064 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.851462 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6c65ea3f-14be-4130-b116-2291c114323e","Type":"ContainerStarted","Data":"d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7"} Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.854485 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.861122 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerStarted","Data":"ea2e31c884349bfdd80de491510d5a2078e9cdcd5e7a28ecb3e0083a67bc37ca"} Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.862319 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.864116 5102 generic.go:334] "Generic (PLEG): container finished" podID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerID="a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c" exitCode=0 Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.864144 5102 generic.go:334] "Generic (PLEG): container finished" podID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerID="9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97" exitCode=143 Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.864160 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247","Type":"ContainerDied","Data":"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c"} Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.864179 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247","Type":"ContainerDied","Data":"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97"} Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.864190 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247","Type":"ContainerDied","Data":"e952502856bf72ca2b7fc84023aa49ba03633c6e79d79757d8a9b1ed769260d1"} Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.864207 5102 scope.go:117] "RemoveContainer" containerID="a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.864247 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.878869 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-conductor-0" podStartSLOduration=2.878843587 podStartE2EDuration="2.878843587s" podCreationTimestamp="2026-01-23 07:17:33 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:35.870179445 +0000 UTC m=+1406.690528440" watchObservedRunningTime="2026-01-23 07:17:35.878843587 +0000 UTC m=+1406.699192562" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.892273 5102 scope.go:117] "RemoveContainer" containerID="9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.912445 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=3.091502636 podStartE2EDuration="6.912425958s" podCreationTimestamp="2026-01-23 07:17:29 +0000 UTC" firstStartedPulling="2026-01-23 07:17:30.77989059 +0000 UTC m=+1401.600239605" lastFinishedPulling="2026-01-23 07:17:34.600813952 +0000 UTC m=+1405.421162927" observedRunningTime="2026-01-23 07:17:35.905307895 +0000 UTC m=+1406.725656880" watchObservedRunningTime="2026-01-23 07:17:35.912425958 +0000 UTC m=+1406.732774933" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.927685 5102 scope.go:117] "RemoveContainer" containerID="a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c" Jan 23 07:17:35 crc kubenswrapper[5102]: E0123 07:17:35.928223 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c\": container with ID starting with a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c not found: ID does not exist" containerID="a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.928258 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c"} err="failed to get container status \"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c\": rpc error: code = NotFound desc = could not find container \"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c\": container with ID starting with a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c not found: ID does not exist" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.928280 5102 scope.go:117] "RemoveContainer" containerID="9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.928329 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:35 crc kubenswrapper[5102]: E0123 07:17:35.928686 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97\": container with ID starting with 9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97 not found: ID does not exist" containerID="9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.928733 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97"} err="failed to get container status \"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97\": rpc error: code = NotFound desc = could not find container \"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97\": container with ID starting with 9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97 not found: ID does not exist" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.928763 5102 scope.go:117] "RemoveContainer" containerID="a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.929183 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c"} err="failed to get container status \"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c\": rpc error: code = NotFound desc = could not find container \"a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c\": container with ID starting with a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c not found: ID does not exist" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.929221 5102 scope.go:117] "RemoveContainer" containerID="9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.929450 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97"} err="failed to get container status \"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97\": rpc error: code = NotFound desc = could not find container \"9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97\": container with ID starting with 9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97 not found: ID does not exist" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.938037 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.957089 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:35 crc kubenswrapper[5102]: E0123 07:17:35.957531 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-metadata" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.957572 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-metadata" Jan 23 07:17:35 crc kubenswrapper[5102]: E0123 07:17:35.957594 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-log" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.957600 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-log" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.957763 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-metadata" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.957789 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" containerName="nova-metadata-log" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.958694 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.960712 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.960918 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 23 07:17:35 crc kubenswrapper[5102]: I0123 07:17:35.977560 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.084565 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.084890 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5e8976c-47bd-46ed-8b02-8001332ed939-logs\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.084956 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.084992 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fj6mj\" (UniqueName: \"kubernetes.io/projected/b5e8976c-47bd-46ed-8b02-8001332ed939-kube-api-access-fj6mj\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.085117 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-config-data\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.187011 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.187088 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5e8976c-47bd-46ed-8b02-8001332ed939-logs\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.187134 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.187177 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fj6mj\" (UniqueName: \"kubernetes.io/projected/b5e8976c-47bd-46ed-8b02-8001332ed939-kube-api-access-fj6mj\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.187196 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-config-data\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.188315 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5e8976c-47bd-46ed-8b02-8001332ed939-logs\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.193248 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.193607 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-config-data\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.196963 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.203819 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fj6mj\" (UniqueName: \"kubernetes.io/projected/b5e8976c-47bd-46ed-8b02-8001332ed939-kube-api-access-fj6mj\") pod \"nova-metadata-0\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.274160 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:17:36 crc kubenswrapper[5102]: E0123 07:17:36.475555 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:17:36 crc kubenswrapper[5102]: E0123 07:17:36.495158 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:17:36 crc kubenswrapper[5102]: E0123 07:17:36.507244 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:17:36 crc kubenswrapper[5102]: E0123 07:17:36.507317 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="17fc342d-0e6c-45f3-b623-4018faf20020" containerName="nova-scheduler-scheduler" Jan 23 07:17:36 crc kubenswrapper[5102]: I0123 07:17:36.917411 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:17:37 crc kubenswrapper[5102]: I0123 07:17:37.610258 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247" path="/var/lib/kubelet/pods/a65a2ea5-cf9e-49d8-b8b8-f7a27fbd5247/volumes" Jan 23 07:17:37 crc kubenswrapper[5102]: I0123 07:17:37.891787 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b5e8976c-47bd-46ed-8b02-8001332ed939","Type":"ContainerStarted","Data":"95fec98cbec410ddb48024a5e38a2637ac677c9305b0383d43b141ef308abb41"} Jan 23 07:17:37 crc kubenswrapper[5102]: I0123 07:17:37.892126 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b5e8976c-47bd-46ed-8b02-8001332ed939","Type":"ContainerStarted","Data":"e3164f5181fac42e2f9df3dc2c85e26eb9932232e7c524bab82d3e9336cbeec0"} Jan 23 07:17:37 crc kubenswrapper[5102]: I0123 07:17:37.892139 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b5e8976c-47bd-46ed-8b02-8001332ed939","Type":"ContainerStarted","Data":"481aba155621706cfcb1050e2128b2adea8c884a348dcbd70b3e71a5101e038f"} Jan 23 07:17:37 crc kubenswrapper[5102]: I0123 07:17:37.896017 5102 generic.go:334] "Generic (PLEG): container finished" podID="17fc342d-0e6c-45f3-b623-4018faf20020" containerID="945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3" exitCode=0 Jan 23 07:17:37 crc kubenswrapper[5102]: I0123 07:17:37.896942 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17fc342d-0e6c-45f3-b623-4018faf20020","Type":"ContainerDied","Data":"945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3"} Jan 23 07:17:37 crc kubenswrapper[5102]: I0123 07:17:37.922184 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=2.9221599620000003 podStartE2EDuration="2.922159962s" podCreationTimestamp="2026-01-23 07:17:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:37.911349924 +0000 UTC m=+1408.731698899" watchObservedRunningTime="2026-01-23 07:17:37.922159962 +0000 UTC m=+1408.742508947" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.153345 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.228118 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-config-data\") pod \"17fc342d-0e6c-45f3-b623-4018faf20020\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.228218 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p2922\" (UniqueName: \"kubernetes.io/projected/17fc342d-0e6c-45f3-b623-4018faf20020-kube-api-access-p2922\") pod \"17fc342d-0e6c-45f3-b623-4018faf20020\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.228275 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-combined-ca-bundle\") pod \"17fc342d-0e6c-45f3-b623-4018faf20020\" (UID: \"17fc342d-0e6c-45f3-b623-4018faf20020\") " Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.251730 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/17fc342d-0e6c-45f3-b623-4018faf20020-kube-api-access-p2922" (OuterVolumeSpecName: "kube-api-access-p2922") pod "17fc342d-0e6c-45f3-b623-4018faf20020" (UID: "17fc342d-0e6c-45f3-b623-4018faf20020"). InnerVolumeSpecName "kube-api-access-p2922". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.260721 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "17fc342d-0e6c-45f3-b623-4018faf20020" (UID: "17fc342d-0e6c-45f3-b623-4018faf20020"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.264747 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-config-data" (OuterVolumeSpecName: "config-data") pod "17fc342d-0e6c-45f3-b623-4018faf20020" (UID: "17fc342d-0e6c-45f3-b623-4018faf20020"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.330202 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.330243 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p2922\" (UniqueName: \"kubernetes.io/projected/17fc342d-0e6c-45f3-b623-4018faf20020-kube-api-access-p2922\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.330253 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/17fc342d-0e6c-45f3-b623-4018faf20020-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.912568 5102 generic.go:334] "Generic (PLEG): container finished" podID="d16577a7-7253-4f86-917a-f398d30ef959" containerID="eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c" exitCode=0 Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.912595 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d16577a7-7253-4f86-917a-f398d30ef959","Type":"ContainerDied","Data":"eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c"} Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.926857 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"17fc342d-0e6c-45f3-b623-4018faf20020","Type":"ContainerDied","Data":"f99b46d31f0aa251d2f4d1e3d8f697c0b9cd665573a11e14bbf34f3549d7eea9"} Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.926907 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:17:38 crc kubenswrapper[5102]: I0123 07:17:38.926952 5102 scope.go:117] "RemoveContainer" containerID="945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.021016 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.051584 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.076505 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:39 crc kubenswrapper[5102]: E0123 07:17:39.077642 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="17fc342d-0e6c-45f3-b623-4018faf20020" containerName="nova-scheduler-scheduler" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.077669 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="17fc342d-0e6c-45f3-b623-4018faf20020" containerName="nova-scheduler-scheduler" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.077952 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="17fc342d-0e6c-45f3-b623-4018faf20020" containerName="nova-scheduler-scheduler" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.079006 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.082626 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.089573 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.143150 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4kh44\" (UniqueName: \"kubernetes.io/projected/1f808f17-0e86-44de-a0d7-c326fe363e26-kube-api-access-4kh44\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.143621 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-config-data\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.143837 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.190788 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.245666 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-combined-ca-bundle\") pod \"d16577a7-7253-4f86-917a-f398d30ef959\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.245751 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l9tsq\" (UniqueName: \"kubernetes.io/projected/d16577a7-7253-4f86-917a-f398d30ef959-kube-api-access-l9tsq\") pod \"d16577a7-7253-4f86-917a-f398d30ef959\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.245784 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d16577a7-7253-4f86-917a-f398d30ef959-logs\") pod \"d16577a7-7253-4f86-917a-f398d30ef959\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.245868 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-config-data\") pod \"d16577a7-7253-4f86-917a-f398d30ef959\" (UID: \"d16577a7-7253-4f86-917a-f398d30ef959\") " Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.246135 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4kh44\" (UniqueName: \"kubernetes.io/projected/1f808f17-0e86-44de-a0d7-c326fe363e26-kube-api-access-4kh44\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.246227 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-config-data\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.246260 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.246565 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d16577a7-7253-4f86-917a-f398d30ef959-logs" (OuterVolumeSpecName: "logs") pod "d16577a7-7253-4f86-917a-f398d30ef959" (UID: "d16577a7-7253-4f86-917a-f398d30ef959"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.250777 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-config-data\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.265347 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.266653 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d16577a7-7253-4f86-917a-f398d30ef959-kube-api-access-l9tsq" (OuterVolumeSpecName: "kube-api-access-l9tsq") pod "d16577a7-7253-4f86-917a-f398d30ef959" (UID: "d16577a7-7253-4f86-917a-f398d30ef959"). InnerVolumeSpecName "kube-api-access-l9tsq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.276257 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-config-data" (OuterVolumeSpecName: "config-data") pod "d16577a7-7253-4f86-917a-f398d30ef959" (UID: "d16577a7-7253-4f86-917a-f398d30ef959"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.280709 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d16577a7-7253-4f86-917a-f398d30ef959" (UID: "d16577a7-7253-4f86-917a-f398d30ef959"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.285059 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4kh44\" (UniqueName: \"kubernetes.io/projected/1f808f17-0e86-44de-a0d7-c326fe363e26-kube-api-access-4kh44\") pod \"nova-scheduler-0\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.347640 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l9tsq\" (UniqueName: \"kubernetes.io/projected/d16577a7-7253-4f86-917a-f398d30ef959-kube-api-access-l9tsq\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.347682 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/d16577a7-7253-4f86-917a-f398d30ef959-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.347695 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.347708 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d16577a7-7253-4f86-917a-f398d30ef959-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.491001 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.631281 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="17fc342d-0e6c-45f3-b623-4018faf20020" path="/var/lib/kubelet/pods/17fc342d-0e6c-45f3-b623-4018faf20020/volumes" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.898981 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.944361 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f808f17-0e86-44de-a0d7-c326fe363e26","Type":"ContainerStarted","Data":"37526bcb1643f38bf8d24650f2ab0561a91dc640200e0c40ed66ddef910f3daa"} Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.949582 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.949630 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"d16577a7-7253-4f86-917a-f398d30ef959","Type":"ContainerDied","Data":"911ecba1433c3e37b298295a2d1ac434b8b740f9eed0a85bd8f6cf1ab1b965bc"} Jan 23 07:17:39 crc kubenswrapper[5102]: I0123 07:17:39.949714 5102 scope.go:117] "RemoveContainer" containerID="eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.056768 5102 scope.go:117] "RemoveContainer" containerID="e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.134320 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.145925 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.158008 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:40 crc kubenswrapper[5102]: E0123 07:17:40.158459 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-log" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.158478 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-log" Jan 23 07:17:40 crc kubenswrapper[5102]: E0123 07:17:40.158511 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-api" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.158517 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-api" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.158726 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-api" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.158749 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d16577a7-7253-4f86-917a-f398d30ef959" containerName="nova-api-log" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.159852 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.165071 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.168086 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.265599 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.265694 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhg24\" (UniqueName: \"kubernetes.io/projected/f61cac79-3f5c-4793-b3e2-57ff97f40880-kube-api-access-xhg24\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.265726 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-config-data\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.265753 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f61cac79-3f5c-4793-b3e2-57ff97f40880-logs\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.367488 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.367598 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhg24\" (UniqueName: \"kubernetes.io/projected/f61cac79-3f5c-4793-b3e2-57ff97f40880-kube-api-access-xhg24\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.367629 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-config-data\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.367657 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f61cac79-3f5c-4793-b3e2-57ff97f40880-logs\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.368126 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f61cac79-3f5c-4793-b3e2-57ff97f40880-logs\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.378116 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.388845 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-config-data\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.395156 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhg24\" (UniqueName: \"kubernetes.io/projected/f61cac79-3f5c-4793-b3e2-57ff97f40880-kube-api-access-xhg24\") pod \"nova-api-0\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.483384 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.986592 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:17:40 crc kubenswrapper[5102]: I0123 07:17:40.989934 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f808f17-0e86-44de-a0d7-c326fe363e26","Type":"ContainerStarted","Data":"1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30"} Jan 23 07:17:41 crc kubenswrapper[5102]: I0123 07:17:41.019300 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.019278081 podStartE2EDuration="2.019278081s" podCreationTimestamp="2026-01-23 07:17:39 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:41.016190935 +0000 UTC m=+1411.836539910" watchObservedRunningTime="2026-01-23 07:17:41.019278081 +0000 UTC m=+1411.839627056" Jan 23 07:17:41 crc kubenswrapper[5102]: I0123 07:17:41.275939 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 07:17:41 crc kubenswrapper[5102]: I0123 07:17:41.276895 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 07:17:41 crc kubenswrapper[5102]: I0123 07:17:41.619006 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d16577a7-7253-4f86-917a-f398d30ef959" path="/var/lib/kubelet/pods/d16577a7-7253-4f86-917a-f398d30ef959/volumes" Jan 23 07:17:42 crc kubenswrapper[5102]: I0123 07:17:42.005945 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f61cac79-3f5c-4793-b3e2-57ff97f40880","Type":"ContainerStarted","Data":"4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94"} Jan 23 07:17:42 crc kubenswrapper[5102]: I0123 07:17:42.005991 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f61cac79-3f5c-4793-b3e2-57ff97f40880","Type":"ContainerStarted","Data":"94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624"} Jan 23 07:17:42 crc kubenswrapper[5102]: I0123 07:17:42.006002 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f61cac79-3f5c-4793-b3e2-57ff97f40880","Type":"ContainerStarted","Data":"073393df643f6fb36776cea9427c6bbbda54e6fc7405b9eaaf6ec538ec3f32fa"} Jan 23 07:17:42 crc kubenswrapper[5102]: I0123 07:17:42.036563 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.036526781 podStartE2EDuration="2.036526781s" podCreationTimestamp="2026-01-23 07:17:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:42.028453148 +0000 UTC m=+1412.848802143" watchObservedRunningTime="2026-01-23 07:17:42.036526781 +0000 UTC m=+1412.856875756" Jan 23 07:17:44 crc kubenswrapper[5102]: I0123 07:17:44.352228 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-conductor-0" Jan 23 07:17:44 crc kubenswrapper[5102]: I0123 07:17:44.491296 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 23 07:17:46 crc kubenswrapper[5102]: I0123 07:17:46.274938 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 07:17:46 crc kubenswrapper[5102]: I0123 07:17:46.275007 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 07:17:46 crc kubenswrapper[5102]: I0123 07:17:46.768443 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:17:46 crc kubenswrapper[5102]: I0123 07:17:46.768829 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:17:47 crc kubenswrapper[5102]: I0123 07:17:47.293710 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 07:17:47 crc kubenswrapper[5102]: I0123 07:17:47.293781 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 07:17:49 crc kubenswrapper[5102]: I0123 07:17:49.492196 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 23 07:17:49 crc kubenswrapper[5102]: I0123 07:17:49.527065 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 23 07:17:50 crc kubenswrapper[5102]: I0123 07:17:50.157713 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 23 07:17:50 crc kubenswrapper[5102]: I0123 07:17:50.485802 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 07:17:50 crc kubenswrapper[5102]: I0123 07:17:50.485886 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 07:17:51 crc kubenswrapper[5102]: I0123 07:17:51.568773 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-log" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:17:51 crc kubenswrapper[5102]: I0123 07:17:51.570181 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-api" probeResult="failure" output="Get \"http://10.217.0.198:8774/\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Jan 23 07:17:56 crc kubenswrapper[5102]: I0123 07:17:56.288018 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 07:17:56 crc kubenswrapper[5102]: I0123 07:17:56.290041 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 07:17:56 crc kubenswrapper[5102]: I0123 07:17:56.297122 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.724896 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-e952502856bf72ca2b7fc84023aa49ba03633c6e79d79757d8a9b1ed769260d1": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-e952502856bf72ca2b7fc84023aa49ba03633c6e79d79757d8a9b1ed769260d1: no such file or directory Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.725024 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-conmon-9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-conmon-9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97.scope: no such file or directory Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.725123 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-9a90afd10c5d72ecfb55d730f484a7eab459a404415b8c5b64ed2ba94ab57f97.scope: no such file or directory Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.725202 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-conmon-a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-conmon-a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c.scope: no such file or directory Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.725283 5102 watcher.go:93] Error while processing event ("/sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-poda65a2ea5_cf9e_49d8_b8b8_f7a27fbd5247.slice/crio-a54bbd6ec871d14bfb4dff31ccf111d11e8b6dc0657e08b5e86ce120df68a98c.scope: no such file or directory Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.729382 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod17fc342d_0e6c_45f3_b623_4018faf20020.slice/crio-945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3.scope WatchSource:0}: Error finding container 945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3: Status 404 returned error can't find the container with id 945d035868edbd25af91f84a6e45f99f95325c055819806eadf4edc2b915dcc3 Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.730489 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd16577a7_7253_4f86_917a_f398d30ef959.slice/crio-e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7.scope WatchSource:0}: Error finding container e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7: Status 404 returned error can't find the container with id e34fb334f2d20cc907c3d93f9280ad2977c813e14323f1d350147f0c75df5ad7 Jan 23 07:17:56 crc kubenswrapper[5102]: W0123 07:17:56.733751 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd16577a7_7253_4f86_917a_f398d30ef959.slice/crio-eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c.scope WatchSource:0}: Error finding container eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c: Status 404 returned error can't find the container with id eeee6e259a5fb16d4254975b622d666c5e80309f694ba8e28ec7b619615d951c Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.154151 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.198725 5102 generic.go:334] "Generic (PLEG): container finished" podID="18efcf1f-9c02-45d3-bf31-455615a550fc" containerID="c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef" exitCode=137 Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.199261 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.199647 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"18efcf1f-9c02-45d3-bf31-455615a550fc","Type":"ContainerDied","Data":"c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef"} Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.199761 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"18efcf1f-9c02-45d3-bf31-455615a550fc","Type":"ContainerDied","Data":"779a566c38935fa2eed184bd5cb5896833cad0b0429121fe578c8d8cbc2414f5"} Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.199869 5102 scope.go:117] "RemoveContainer" containerID="c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.214506 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.249843 5102 scope.go:117] "RemoveContainer" containerID="c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef" Jan 23 07:17:57 crc kubenswrapper[5102]: E0123 07:17:57.254937 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef\": container with ID starting with c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef not found: ID does not exist" containerID="c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.255261 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef"} err="failed to get container status \"c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef\": rpc error: code = NotFound desc = could not find container \"c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef\": container with ID starting with c156a2494ea0bdf872c48777cb2f276001e30c1811ac6c5f47d431fe5cea1bef not found: ID does not exist" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.299960 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-combined-ca-bundle\") pod \"18efcf1f-9c02-45d3-bf31-455615a550fc\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.301379 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-config-data\") pod \"18efcf1f-9c02-45d3-bf31-455615a550fc\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.301586 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tg7s\" (UniqueName: \"kubernetes.io/projected/18efcf1f-9c02-45d3-bf31-455615a550fc-kube-api-access-6tg7s\") pod \"18efcf1f-9c02-45d3-bf31-455615a550fc\" (UID: \"18efcf1f-9c02-45d3-bf31-455615a550fc\") " Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.313667 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/18efcf1f-9c02-45d3-bf31-455615a550fc-kube-api-access-6tg7s" (OuterVolumeSpecName: "kube-api-access-6tg7s") pod "18efcf1f-9c02-45d3-bf31-455615a550fc" (UID: "18efcf1f-9c02-45d3-bf31-455615a550fc"). InnerVolumeSpecName "kube-api-access-6tg7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.331294 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "18efcf1f-9c02-45d3-bf31-455615a550fc" (UID: "18efcf1f-9c02-45d3-bf31-455615a550fc"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.332351 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-config-data" (OuterVolumeSpecName: "config-data") pod "18efcf1f-9c02-45d3-bf31-455615a550fc" (UID: "18efcf1f-9c02-45d3-bf31-455615a550fc"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.403907 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.403953 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/18efcf1f-9c02-45d3-bf31-455615a550fc-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.403971 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tg7s\" (UniqueName: \"kubernetes.io/projected/18efcf1f-9c02-45d3-bf31-455615a550fc-kube-api-access-6tg7s\") on node \"crc\" DevicePath \"\"" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.546686 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.558559 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.571490 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:57 crc kubenswrapper[5102]: E0123 07:17:57.572289 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="18efcf1f-9c02-45d3-bf31-455615a550fc" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.572319 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="18efcf1f-9c02-45d3-bf31-455615a550fc" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.572668 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="18efcf1f-9c02-45d3-bf31-455615a550fc" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.573748 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.575882 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-public-svc" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.576941 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-novncproxy-config-data" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.577147 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-novncproxy-cell1-vencrypt" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.584451 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.607999 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="18efcf1f-9c02-45d3-bf31-455615a550fc" path="/var/lib/kubelet/pods/18efcf1f-9c02-45d3-bf31-455615a550fc/volumes" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.708926 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.709037 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.709078 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.709120 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lqtnb\" (UniqueName: \"kubernetes.io/projected/d9a35726-d2a8-4175-9398-2f49e4598f63-kube-api-access-lqtnb\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.709145 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.811161 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.811209 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.811247 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lqtnb\" (UniqueName: \"kubernetes.io/projected/d9a35726-d2a8-4175-9398-2f49e4598f63-kube-api-access-lqtnb\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.811266 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.811368 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.815259 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-combined-ca-bundle\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.817097 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-nova-novncproxy-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.820063 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-config-data\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.823068 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-vencrypt-tls-certs\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.826655 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lqtnb\" (UniqueName: \"kubernetes.io/projected/d9a35726-d2a8-4175-9398-2f49e4598f63-kube-api-access-lqtnb\") pod \"nova-cell1-novncproxy-0\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:57 crc kubenswrapper[5102]: I0123 07:17:57.895071 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:17:58 crc kubenswrapper[5102]: W0123 07:17:58.343041 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd9a35726_d2a8_4175_9398_2f49e4598f63.slice/crio-7da464590a5dadf7254d54fa366db75e0fa8cad58465651d54c77255d00fa00c WatchSource:0}: Error finding container 7da464590a5dadf7254d54fa366db75e0fa8cad58465651d54c77255d00fa00c: Status 404 returned error can't find the container with id 7da464590a5dadf7254d54fa366db75e0fa8cad58465651d54c77255d00fa00c Jan 23 07:17:58 crc kubenswrapper[5102]: I0123 07:17:58.345158 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:17:59 crc kubenswrapper[5102]: I0123 07:17:59.227173 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d9a35726-d2a8-4175-9398-2f49e4598f63","Type":"ContainerStarted","Data":"4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99"} Jan 23 07:17:59 crc kubenswrapper[5102]: I0123 07:17:59.227281 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d9a35726-d2a8-4175-9398-2f49e4598f63","Type":"ContainerStarted","Data":"7da464590a5dadf7254d54fa366db75e0fa8cad58465651d54c77255d00fa00c"} Jan 23 07:17:59 crc kubenswrapper[5102]: I0123 07:17:59.250812 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-novncproxy-0" podStartSLOduration=2.250784651 podStartE2EDuration="2.250784651s" podCreationTimestamp="2026-01-23 07:17:57 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:17:59.248082406 +0000 UTC m=+1430.068431441" watchObservedRunningTime="2026-01-23 07:17:59.250784651 +0000 UTC m=+1430.071133666" Jan 23 07:18:00 crc kubenswrapper[5102]: I0123 07:18:00.128900 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 23 07:18:00 crc kubenswrapper[5102]: I0123 07:18:00.488684 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 07:18:00 crc kubenswrapper[5102]: I0123 07:18:00.489427 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 07:18:00 crc kubenswrapper[5102]: I0123 07:18:00.489759 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 07:18:00 crc kubenswrapper[5102]: I0123 07:18:00.494958 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.291605 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.295335 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.566949 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-9fbct"] Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.568763 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.585675 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-9fbct"] Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.729889 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cpz7h\" (UniqueName: \"kubernetes.io/projected/241a02f1-ca6d-4c3c-b635-2156947f47c4-kube-api-access-cpz7h\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.729977 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-sb\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.730086 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-svc\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.730116 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-config\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.730147 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-nb\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.730228 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-swift-storage-0\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.832306 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-swift-storage-0\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.832409 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cpz7h\" (UniqueName: \"kubernetes.io/projected/241a02f1-ca6d-4c3c-b635-2156947f47c4-kube-api-access-cpz7h\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.832449 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-sb\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.832496 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-svc\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.832516 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-config\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.832562 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-nb\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.833571 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-nb\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.833833 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-config\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.833869 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-sb\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.834025 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-svc\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.834065 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-swift-storage-0\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.863735 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cpz7h\" (UniqueName: \"kubernetes.io/projected/241a02f1-ca6d-4c3c-b635-2156947f47c4-kube-api-access-cpz7h\") pod \"dnsmasq-dns-fcd6f8f8f-9fbct\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:01 crc kubenswrapper[5102]: I0123 07:18:01.892912 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:02 crc kubenswrapper[5102]: I0123 07:18:02.351794 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-9fbct"] Jan 23 07:18:02 crc kubenswrapper[5102]: I0123 07:18:02.895641 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:18:03 crc kubenswrapper[5102]: I0123 07:18:03.310325 5102 generic.go:334] "Generic (PLEG): container finished" podID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerID="06c0a7654873431b095e5493274efbb96eff7c0aed02ef4432705c549fc8af79" exitCode=0 Jan 23 07:18:03 crc kubenswrapper[5102]: I0123 07:18:03.310441 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" event={"ID":"241a02f1-ca6d-4c3c-b635-2156947f47c4","Type":"ContainerDied","Data":"06c0a7654873431b095e5493274efbb96eff7c0aed02ef4432705c549fc8af79"} Jan 23 07:18:03 crc kubenswrapper[5102]: I0123 07:18:03.310490 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" event={"ID":"241a02f1-ca6d-4c3c-b635-2156947f47c4","Type":"ContainerStarted","Data":"0004288105d602bcb46a4b5f4689fd556ee0425c687d50be51cae3c640596ed8"} Jan 23 07:18:03 crc kubenswrapper[5102]: I0123 07:18:03.928358 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.328582 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" event={"ID":"241a02f1-ca6d-4c3c-b635-2156947f47c4","Type":"ContainerStarted","Data":"f4d5919866b49897b78006ffb1208e46c4edbce085f06eb3868680b3cfad3178"} Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.328768 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-log" containerID="cri-o://94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624" gracePeriod=30 Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.328916 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-api" containerID="cri-o://4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94" gracePeriod=30 Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.329527 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.342054 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.342825 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-central-agent" containerID="cri-o://b59fb2f36892c8b6fab1579f8ab5cd8c2a9f8af79555a1e6562cbe776526632e" gracePeriod=30 Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.343035 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="proxy-httpd" containerID="cri-o://ea2e31c884349bfdd80de491510d5a2078e9cdcd5e7a28ecb3e0083a67bc37ca" gracePeriod=30 Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.343125 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="sg-core" containerID="cri-o://011f058fbe518801efdd080d127e821f519b48af0ea6779adcfe136e1589979c" gracePeriod=30 Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.343198 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-notification-agent" containerID="cri-o://5e53a68a7de19de993fc494afb2a5deb3586933db407c709b7e878f663584cdd" gracePeriod=30 Jan 23 07:18:04 crc kubenswrapper[5102]: I0123 07:18:04.360689 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" podStartSLOduration=3.36066687 podStartE2EDuration="3.36066687s" podCreationTimestamp="2026-01-23 07:18:01 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:18:04.356723096 +0000 UTC m=+1435.177072071" watchObservedRunningTime="2026-01-23 07:18:04.36066687 +0000 UTC m=+1435.181015845" Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.340853 5102 generic.go:334] "Generic (PLEG): container finished" podID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerID="ea2e31c884349bfdd80de491510d5a2078e9cdcd5e7a28ecb3e0083a67bc37ca" exitCode=0 Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.340887 5102 generic.go:334] "Generic (PLEG): container finished" podID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerID="011f058fbe518801efdd080d127e821f519b48af0ea6779adcfe136e1589979c" exitCode=2 Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.340895 5102 generic.go:334] "Generic (PLEG): container finished" podID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerID="b59fb2f36892c8b6fab1579f8ab5cd8c2a9f8af79555a1e6562cbe776526632e" exitCode=0 Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.340909 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerDied","Data":"ea2e31c884349bfdd80de491510d5a2078e9cdcd5e7a28ecb3e0083a67bc37ca"} Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.340964 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerDied","Data":"011f058fbe518801efdd080d127e821f519b48af0ea6779adcfe136e1589979c"} Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.340979 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerDied","Data":"b59fb2f36892c8b6fab1579f8ab5cd8c2a9f8af79555a1e6562cbe776526632e"} Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.344821 5102 generic.go:334] "Generic (PLEG): container finished" podID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerID="94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624" exitCode=143 Jan 23 07:18:05 crc kubenswrapper[5102]: I0123 07:18:05.344902 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f61cac79-3f5c-4793-b3e2-57ff97f40880","Type":"ContainerDied","Data":"94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624"} Jan 23 07:18:07 crc kubenswrapper[5102]: I0123 07:18:07.895560 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:18:07 crc kubenswrapper[5102]: I0123 07:18:07.924357 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.016477 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.089663 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f61cac79-3f5c-4793-b3e2-57ff97f40880-logs\") pod \"f61cac79-3f5c-4793-b3e2-57ff97f40880\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.089834 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-config-data\") pod \"f61cac79-3f5c-4793-b3e2-57ff97f40880\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.090735 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-combined-ca-bundle\") pod \"f61cac79-3f5c-4793-b3e2-57ff97f40880\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.090818 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhg24\" (UniqueName: \"kubernetes.io/projected/f61cac79-3f5c-4793-b3e2-57ff97f40880-kube-api-access-xhg24\") pod \"f61cac79-3f5c-4793-b3e2-57ff97f40880\" (UID: \"f61cac79-3f5c-4793-b3e2-57ff97f40880\") " Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.091285 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f61cac79-3f5c-4793-b3e2-57ff97f40880-logs" (OuterVolumeSpecName: "logs") pod "f61cac79-3f5c-4793-b3e2-57ff97f40880" (UID: "f61cac79-3f5c-4793-b3e2-57ff97f40880"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.098630 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f61cac79-3f5c-4793-b3e2-57ff97f40880-kube-api-access-xhg24" (OuterVolumeSpecName: "kube-api-access-xhg24") pod "f61cac79-3f5c-4793-b3e2-57ff97f40880" (UID: "f61cac79-3f5c-4793-b3e2-57ff97f40880"). InnerVolumeSpecName "kube-api-access-xhg24". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.122301 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f61cac79-3f5c-4793-b3e2-57ff97f40880" (UID: "f61cac79-3f5c-4793-b3e2-57ff97f40880"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.124259 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-config-data" (OuterVolumeSpecName: "config-data") pod "f61cac79-3f5c-4793-b3e2-57ff97f40880" (UID: "f61cac79-3f5c-4793-b3e2-57ff97f40880"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.193694 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.193758 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f61cac79-3f5c-4793-b3e2-57ff97f40880-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.193775 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhg24\" (UniqueName: \"kubernetes.io/projected/f61cac79-3f5c-4793-b3e2-57ff97f40880-kube-api-access-xhg24\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.193811 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/f61cac79-3f5c-4793-b3e2-57ff97f40880-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.266000 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.266265 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="9d9ac8cd-f88c-4828-a66b-19689f94104e" containerName="kube-state-metrics" containerID="cri-o://f284c0397c0ecb9cee83829e220db8548fa8227c3f98e3f6c35a60e6abe2bd5e" gracePeriod=30 Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.405155 5102 generic.go:334] "Generic (PLEG): container finished" podID="9d9ac8cd-f88c-4828-a66b-19689f94104e" containerID="f284c0397c0ecb9cee83829e220db8548fa8227c3f98e3f6c35a60e6abe2bd5e" exitCode=2 Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.405248 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d9ac8cd-f88c-4828-a66b-19689f94104e","Type":"ContainerDied","Data":"f284c0397c0ecb9cee83829e220db8548fa8227c3f98e3f6c35a60e6abe2bd5e"} Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.411458 5102 generic.go:334] "Generic (PLEG): container finished" podID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerID="4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94" exitCode=0 Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.411561 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f61cac79-3f5c-4793-b3e2-57ff97f40880","Type":"ContainerDied","Data":"4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94"} Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.411605 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"f61cac79-3f5c-4793-b3e2-57ff97f40880","Type":"ContainerDied","Data":"073393df643f6fb36776cea9427c6bbbda54e6fc7405b9eaaf6ec538ec3f32fa"} Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.411527 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.411628 5102 scope.go:117] "RemoveContainer" containerID="4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.441176 5102 scope.go:117] "RemoveContainer" containerID="94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.442630 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.521753 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.530717 5102 scope.go:117] "RemoveContainer" containerID="4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94" Jan 23 07:18:08 crc kubenswrapper[5102]: E0123 07:18:08.531604 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94\": container with ID starting with 4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94 not found: ID does not exist" containerID="4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.531635 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94"} err="failed to get container status \"4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94\": rpc error: code = NotFound desc = could not find container \"4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94\": container with ID starting with 4426c4aa79667149d98f29ee5a80f2efe65c43a79a435f4d4e1c7fd032c30a94 not found: ID does not exist" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.531659 5102 scope.go:117] "RemoveContainer" containerID="94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.531712 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:08 crc kubenswrapper[5102]: E0123 07:18:08.532897 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624\": container with ID starting with 94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624 not found: ID does not exist" containerID="94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.532924 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624"} err="failed to get container status \"94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624\": rpc error: code = NotFound desc = could not find container \"94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624\": container with ID starting with 94c418daf8c7dbafdc40ea51cf84d64acfd483652d094897d8d871c8b8d9e624 not found: ID does not exist" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.562599 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:08 crc kubenswrapper[5102]: E0123 07:18:08.562978 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-api" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.562990 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-api" Jan 23 07:18:08 crc kubenswrapper[5102]: E0123 07:18:08.563002 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-log" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.563008 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-log" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.563175 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-api" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.563187 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" containerName="nova-api-log" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.564115 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.565916 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.591121 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.591469 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.591627 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.623012 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-public-tls-certs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.623098 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.623124 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.623183 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-config-data\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.623235 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ed35bcd-b42a-4517-948c-78b730416518-logs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.623257 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vjvfv\" (UniqueName: \"kubernetes.io/projected/3ed35bcd-b42a-4517-948c-78b730416518-kube-api-access-vjvfv\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.727168 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.727283 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-config-data\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.727323 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ed35bcd-b42a-4517-948c-78b730416518-logs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.727345 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vjvfv\" (UniqueName: \"kubernetes.io/projected/3ed35bcd-b42a-4517-948c-78b730416518-kube-api-access-vjvfv\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.727389 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-public-tls-certs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.727463 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.727965 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ed35bcd-b42a-4517-948c-78b730416518-logs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.735516 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-config-data\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.736428 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-public-tls-certs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.737086 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.738653 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-internal-tls-certs\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.755482 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vjvfv\" (UniqueName: \"kubernetes.io/projected/3ed35bcd-b42a-4517-948c-78b730416518-kube-api-access-vjvfv\") pod \"nova-api-0\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.794686 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-cell-mapping-hzkhv"] Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.796320 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.801668 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-config-data" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.801796 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-manage-scripts" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.806666 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-hzkhv"] Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.930749 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-scripts\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.931147 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt8tr\" (UniqueName: \"kubernetes.io/projected/195d99d7-c8be-42f4-8f65-3209be1334b5-kube-api-access-jt8tr\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.931184 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-config-data\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.931250 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.949279 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:08 crc kubenswrapper[5102]: I0123 07:18:08.973828 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.032106 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jsp8g\" (UniqueName: \"kubernetes.io/projected/9d9ac8cd-f88c-4828-a66b-19689f94104e-kube-api-access-jsp8g\") pod \"9d9ac8cd-f88c-4828-a66b-19689f94104e\" (UID: \"9d9ac8cd-f88c-4828-a66b-19689f94104e\") " Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.032619 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt8tr\" (UniqueName: \"kubernetes.io/projected/195d99d7-c8be-42f4-8f65-3209be1334b5-kube-api-access-jt8tr\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.032653 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-config-data\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.032705 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.032755 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-scripts\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.038022 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-scripts\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.038804 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-config-data\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.039504 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d9ac8cd-f88c-4828-a66b-19689f94104e-kube-api-access-jsp8g" (OuterVolumeSpecName: "kube-api-access-jsp8g") pod "9d9ac8cd-f88c-4828-a66b-19689f94104e" (UID: "9d9ac8cd-f88c-4828-a66b-19689f94104e"). InnerVolumeSpecName "kube-api-access-jsp8g". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.048015 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-combined-ca-bundle\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.054141 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt8tr\" (UniqueName: \"kubernetes.io/projected/195d99d7-c8be-42f4-8f65-3209be1334b5-kube-api-access-jt8tr\") pod \"nova-cell1-cell-mapping-hzkhv\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.135022 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jsp8g\" (UniqueName: \"kubernetes.io/projected/9d9ac8cd-f88c-4828-a66b-19689f94104e-kube-api-access-jsp8g\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.142629 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.423978 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.423981 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"9d9ac8cd-f88c-4828-a66b-19689f94104e","Type":"ContainerDied","Data":"54f42f218385ae6b09490b2941b329da240aaea572513d1cb51a3706b7ab70ad"} Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.424566 5102 scope.go:117] "RemoveContainer" containerID="f284c0397c0ecb9cee83829e220db8548fa8227c3f98e3f6c35a60e6abe2bd5e" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.461063 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.467682 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.477822 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.490278 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:18:09 crc kubenswrapper[5102]: E0123 07:18:09.490918 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9d9ac8cd-f88c-4828-a66b-19689f94104e" containerName="kube-state-metrics" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.490937 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="9d9ac8cd-f88c-4828-a66b-19689f94104e" containerName="kube-state-metrics" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.491185 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="9d9ac8cd-f88c-4828-a66b-19689f94104e" containerName="kube-state-metrics" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.492398 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.498933 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.499142 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.500265 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:18:09 crc kubenswrapper[5102]: W0123 07:18:09.501409 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod3ed35bcd_b42a_4517_948c_78b730416518.slice/crio-1b89818afb7d78130a58aef2435d8641d9cfa1b4171f1fce2587e3dfd6710e25 WatchSource:0}: Error finding container 1b89818afb7d78130a58aef2435d8641d9cfa1b4171f1fce2587e3dfd6710e25: Status 404 returned error can't find the container with id 1b89818afb7d78130a58aef2435d8641d9cfa1b4171f1fce2587e3dfd6710e25 Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.541207 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.541664 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.541790 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.541812 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw455\" (UniqueName: \"kubernetes.io/projected/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-api-access-jw455\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.624418 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d9ac8cd-f88c-4828-a66b-19689f94104e" path="/var/lib/kubelet/pods/9d9ac8cd-f88c-4828-a66b-19689f94104e/volumes" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.625037 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f61cac79-3f5c-4793-b3e2-57ff97f40880" path="/var/lib/kubelet/pods/f61cac79-3f5c-4793-b3e2-57ff97f40880/volumes" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.627739 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-cell-mapping-hzkhv"] Jan 23 07:18:09 crc kubenswrapper[5102]: W0123 07:18:09.627890 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod195d99d7_c8be_42f4_8f65_3209be1334b5.slice/crio-62039f1a0865bddc8217d2a8bdc82299b38592a5967255ee48787040f2032a4e WatchSource:0}: Error finding container 62039f1a0865bddc8217d2a8bdc82299b38592a5967255ee48787040f2032a4e: Status 404 returned error can't find the container with id 62039f1a0865bddc8217d2a8bdc82299b38592a5967255ee48787040f2032a4e Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.645333 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.645407 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.645457 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.645477 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw455\" (UniqueName: \"kubernetes.io/projected/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-api-access-jw455\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.650574 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-combined-ca-bundle\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.652143 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-kube-state-metrics-svc" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.652198 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"kube-state-metrics-tls-config" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.659792 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-config\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.661178 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-certs\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.662027 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw455\" (UniqueName: \"kubernetes.io/projected/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-api-access-jw455\") pod \"kube-state-metrics-0\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " pod="openstack/kube-state-metrics-0" Jan 23 07:18:09 crc kubenswrapper[5102]: I0123 07:18:09.817248 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.412402 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.419155 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.448350 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3ed35bcd-b42a-4517-948c-78b730416518","Type":"ContainerStarted","Data":"7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0"} Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.448663 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3ed35bcd-b42a-4517-948c-78b730416518","Type":"ContainerStarted","Data":"53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984"} Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.448731 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3ed35bcd-b42a-4517-948c-78b730416518","Type":"ContainerStarted","Data":"1b89818afb7d78130a58aef2435d8641d9cfa1b4171f1fce2587e3dfd6710e25"} Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.451725 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hzkhv" event={"ID":"195d99d7-c8be-42f4-8f65-3209be1334b5","Type":"ContainerStarted","Data":"fb0523159313e01a611a22443a2546781a5f75a38f16084d9a077df4b793c4bb"} Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.451879 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hzkhv" event={"ID":"195d99d7-c8be-42f4-8f65-3209be1334b5","Type":"ContainerStarted","Data":"62039f1a0865bddc8217d2a8bdc82299b38592a5967255ee48787040f2032a4e"} Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.454787 5102 generic.go:334] "Generic (PLEG): container finished" podID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerID="5e53a68a7de19de993fc494afb2a5deb3586933db407c709b7e878f663584cdd" exitCode=0 Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.454841 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerDied","Data":"5e53a68a7de19de993fc494afb2a5deb3586933db407c709b7e878f663584cdd"} Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.456168 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ddb8da53-e17d-4c8d-a625-0d241d2caafd","Type":"ContainerStarted","Data":"36a88a72098c84ef4ea7189f649900699e4b2e3ab7c027f93d8f04112c60d1ec"} Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.476899 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=2.476882786 podStartE2EDuration="2.476882786s" podCreationTimestamp="2026-01-23 07:18:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:18:10.468993049 +0000 UTC m=+1441.289342034" watchObservedRunningTime="2026-01-23 07:18:10.476882786 +0000 UTC m=+1441.297231761" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.477502 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.488322 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-cell1-cell-mapping-hzkhv" podStartSLOduration=2.488308023 podStartE2EDuration="2.488308023s" podCreationTimestamp="2026-01-23 07:18:08 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:18:10.483247095 +0000 UTC m=+1441.303596070" watchObservedRunningTime="2026-01-23 07:18:10.488308023 +0000 UTC m=+1441.308656998" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.649475 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gt6jz\" (UniqueName: \"kubernetes.io/projected/f87cc0fb-642a-4af4-b080-049a7a29440d-kube-api-access-gt6jz\") pod \"f87cc0fb-642a-4af4-b080-049a7a29440d\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.649817 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-log-httpd\") pod \"f87cc0fb-642a-4af4-b080-049a7a29440d\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.649902 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-config-data\") pod \"f87cc0fb-642a-4af4-b080-049a7a29440d\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.650069 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-combined-ca-bundle\") pod \"f87cc0fb-642a-4af4-b080-049a7a29440d\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.650138 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-scripts\") pod \"f87cc0fb-642a-4af4-b080-049a7a29440d\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.650207 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-sg-core-conf-yaml\") pod \"f87cc0fb-642a-4af4-b080-049a7a29440d\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.650228 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-run-httpd\") pod \"f87cc0fb-642a-4af4-b080-049a7a29440d\" (UID: \"f87cc0fb-642a-4af4-b080-049a7a29440d\") " Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.650369 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "f87cc0fb-642a-4af4-b080-049a7a29440d" (UID: "f87cc0fb-642a-4af4-b080-049a7a29440d"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.650710 5102 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.650716 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "f87cc0fb-642a-4af4-b080-049a7a29440d" (UID: "f87cc0fb-642a-4af4-b080-049a7a29440d"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.656080 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-scripts" (OuterVolumeSpecName: "scripts") pod "f87cc0fb-642a-4af4-b080-049a7a29440d" (UID: "f87cc0fb-642a-4af4-b080-049a7a29440d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.672239 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f87cc0fb-642a-4af4-b080-049a7a29440d-kube-api-access-gt6jz" (OuterVolumeSpecName: "kube-api-access-gt6jz") pod "f87cc0fb-642a-4af4-b080-049a7a29440d" (UID: "f87cc0fb-642a-4af4-b080-049a7a29440d"). InnerVolumeSpecName "kube-api-access-gt6jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.694338 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "f87cc0fb-642a-4af4-b080-049a7a29440d" (UID: "f87cc0fb-642a-4af4-b080-049a7a29440d"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.744281 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "f87cc0fb-642a-4af4-b080-049a7a29440d" (UID: "f87cc0fb-642a-4af4-b080-049a7a29440d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.753097 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.753137 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.753150 5102 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.753166 5102 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/f87cc0fb-642a-4af4-b080-049a7a29440d-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.753178 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gt6jz\" (UniqueName: \"kubernetes.io/projected/f87cc0fb-642a-4af4-b080-049a7a29440d-kube-api-access-gt6jz\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.768832 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-config-data" (OuterVolumeSpecName: "config-data") pod "f87cc0fb-642a-4af4-b080-049a7a29440d" (UID: "f87cc0fb-642a-4af4-b080-049a7a29440d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:10 crc kubenswrapper[5102]: I0123 07:18:10.855229 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/f87cc0fb-642a-4af4-b080-049a7a29440d-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.469159 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"f87cc0fb-642a-4af4-b080-049a7a29440d","Type":"ContainerDied","Data":"4a4eb35f51f45ca4111cdd5ab63c59541e27f8708aa2a06ee04a6c6fe60a7f6d"} Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.469245 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.469525 5102 scope.go:117] "RemoveContainer" containerID="ea2e31c884349bfdd80de491510d5a2078e9cdcd5e7a28ecb3e0083a67bc37ca" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.479358 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ddb8da53-e17d-4c8d-a625-0d241d2caafd","Type":"ContainerStarted","Data":"60739a227d7d472e20c6d49976fdefef7c5c808195298c9c502a74f3226d9f61"} Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.480771 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/kube-state-metrics-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.504263 5102 scope.go:117] "RemoveContainer" containerID="011f058fbe518801efdd080d127e821f519b48af0ea6779adcfe136e1589979c" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.526933 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/kube-state-metrics-0" podStartSLOduration=2.139196103 podStartE2EDuration="2.526906062s" podCreationTimestamp="2026-01-23 07:18:09 +0000 UTC" firstStartedPulling="2026-01-23 07:18:10.418945132 +0000 UTC m=+1441.239294097" lastFinishedPulling="2026-01-23 07:18:10.806655081 +0000 UTC m=+1441.627004056" observedRunningTime="2026-01-23 07:18:11.501719853 +0000 UTC m=+1442.322068828" watchObservedRunningTime="2026-01-23 07:18:11.526906062 +0000 UTC m=+1442.347255057" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.543795 5102 scope.go:117] "RemoveContainer" containerID="5e53a68a7de19de993fc494afb2a5deb3586933db407c709b7e878f663584cdd" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.546502 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.563016 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.578631 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:18:11 crc kubenswrapper[5102]: E0123 07:18:11.579066 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-notification-agent" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579084 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-notification-agent" Jan 23 07:18:11 crc kubenswrapper[5102]: E0123 07:18:11.579105 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="proxy-httpd" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579114 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="proxy-httpd" Jan 23 07:18:11 crc kubenswrapper[5102]: E0123 07:18:11.579127 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-central-agent" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579134 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-central-agent" Jan 23 07:18:11 crc kubenswrapper[5102]: E0123 07:18:11.579158 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="sg-core" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579164 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="sg-core" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579338 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-central-agent" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579353 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="ceilometer-notification-agent" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579371 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="proxy-httpd" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.579388 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" containerName="sg-core" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.581894 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.584951 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-config-data" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.585108 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-ceilometer-internal-svc" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.585373 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"ceilometer-scripts" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.690101 5102 scope.go:117] "RemoveContainer" containerID="b59fb2f36892c8b6fab1579f8ab5cd8c2a9f8af79555a1e6562cbe776526632e" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.719271 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f87cc0fb-642a-4af4-b080-049a7a29440d" path="/var/lib/kubelet/pods/f87cc0fb-642a-4af4-b080-049a7a29440d/volumes" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.720337 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.771307 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-log-httpd\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.771375 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-run-httpd\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.771398 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.771838 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xndbv\" (UniqueName: \"kubernetes.io/projected/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-kube-api-access-xndbv\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.771936 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-config-data\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.771963 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.772099 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.772157 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-scripts\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.873914 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-run-httpd\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.873961 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874039 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xndbv\" (UniqueName: \"kubernetes.io/projected/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-kube-api-access-xndbv\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874067 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-config-data\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874086 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874134 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874165 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-scripts\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874183 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-log-httpd\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874600 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-log-httpd\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.874610 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-run-httpd\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.879867 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-config-data\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.879881 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-ceilometer-tls-certs\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.881315 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-combined-ca-bundle\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.883776 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-scripts\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.886075 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-sg-core-conf-yaml\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.892645 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xndbv\" (UniqueName: \"kubernetes.io/projected/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-kube-api-access-xndbv\") pod \"ceilometer-0\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " pod="openstack/ceilometer-0" Jan 23 07:18:11 crc kubenswrapper[5102]: I0123 07:18:11.894652 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:18:12 crc kubenswrapper[5102]: I0123 07:18:12.001839 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:18:12 crc kubenswrapper[5102]: I0123 07:18:12.024073 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-8cnll"] Jan 23 07:18:12 crc kubenswrapper[5102]: I0123 07:18:12.024385 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" podUID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerName="dnsmasq-dns" containerID="cri-o://e8f705bb6c2768891290fe7fca4e7d7c19573450ece317925fe843b3d7ba3b1f" gracePeriod=10 Jan 23 07:18:12 crc kubenswrapper[5102]: I0123 07:18:12.504752 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:18:12 crc kubenswrapper[5102]: I0123 07:18:12.508176 5102 generic.go:334] "Generic (PLEG): container finished" podID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerID="e8f705bb6c2768891290fe7fca4e7d7c19573450ece317925fe843b3d7ba3b1f" exitCode=0 Jan 23 07:18:12 crc kubenswrapper[5102]: I0123 07:18:12.509151 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" event={"ID":"e298e269-6676-4a09-8bb3-7fb0ad38b62e","Type":"ContainerDied","Data":"e8f705bb6c2768891290fe7fca4e7d7c19573450ece317925fe843b3d7ba3b1f"} Jan 23 07:18:12 crc kubenswrapper[5102]: W0123 07:18:12.510297 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode3bed276_ffe5_4d67_b34c_8e5a8b6c61f8.slice/crio-c42a481bce5f05cab46712eff369c5a893db541a095b4db08fc1d5e3e516ab58 WatchSource:0}: Error finding container c42a481bce5f05cab46712eff369c5a893db541a095b4db08fc1d5e3e516ab58: Status 404 returned error can't find the container with id c42a481bce5f05cab46712eff369c5a893db541a095b4db08fc1d5e3e516ab58 Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.183603 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.252222 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-sb\") pod \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.252897 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-nb\") pod \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.253179 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-config\") pod \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.253204 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-swift-storage-0\") pod \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.253236 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-svc\") pod \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.253299 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jjd5l\" (UniqueName: \"kubernetes.io/projected/e298e269-6676-4a09-8bb3-7fb0ad38b62e-kube-api-access-jjd5l\") pod \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\" (UID: \"e298e269-6676-4a09-8bb3-7fb0ad38b62e\") " Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.272592 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e298e269-6676-4a09-8bb3-7fb0ad38b62e-kube-api-access-jjd5l" (OuterVolumeSpecName: "kube-api-access-jjd5l") pod "e298e269-6676-4a09-8bb3-7fb0ad38b62e" (UID: "e298e269-6676-4a09-8bb3-7fb0ad38b62e"). InnerVolumeSpecName "kube-api-access-jjd5l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.347276 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "e298e269-6676-4a09-8bb3-7fb0ad38b62e" (UID: "e298e269-6676-4a09-8bb3-7fb0ad38b62e"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.348559 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-config" (OuterVolumeSpecName: "config") pod "e298e269-6676-4a09-8bb3-7fb0ad38b62e" (UID: "e298e269-6676-4a09-8bb3-7fb0ad38b62e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.353930 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "e298e269-6676-4a09-8bb3-7fb0ad38b62e" (UID: "e298e269-6676-4a09-8bb3-7fb0ad38b62e"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.354770 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.354794 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.354805 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jjd5l\" (UniqueName: \"kubernetes.io/projected/e298e269-6676-4a09-8bb3-7fb0ad38b62e-kube-api-access-jjd5l\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.354814 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.356358 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "e298e269-6676-4a09-8bb3-7fb0ad38b62e" (UID: "e298e269-6676-4a09-8bb3-7fb0ad38b62e"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.369015 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "e298e269-6676-4a09-8bb3-7fb0ad38b62e" (UID: "e298e269-6676-4a09-8bb3-7fb0ad38b62e"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.456309 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.456351 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/e298e269-6676-4a09-8bb3-7fb0ad38b62e-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.517499 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerStarted","Data":"3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc"} Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.517611 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerStarted","Data":"c42a481bce5f05cab46712eff369c5a893db541a095b4db08fc1d5e3e516ab58"} Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.520083 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.524776 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-647df7b8c5-8cnll" event={"ID":"e298e269-6676-4a09-8bb3-7fb0ad38b62e","Type":"ContainerDied","Data":"aa9b7bc6b516c40ca49d565a8331c7bfb2b1673b96e739af53910207402e5b69"} Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.524866 5102 scope.go:117] "RemoveContainer" containerID="e8f705bb6c2768891290fe7fca4e7d7c19573450ece317925fe843b3d7ba3b1f" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.550235 5102 scope.go:117] "RemoveContainer" containerID="00587c5fdc2f7fa4ac7a71ae4b0bd1f265b5b9ada9f53957d95859e9dddd5fd2" Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.559483 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-8cnll"] Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.567360 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-647df7b8c5-8cnll"] Jan 23 07:18:13 crc kubenswrapper[5102]: I0123 07:18:13.621334 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" path="/var/lib/kubelet/pods/e298e269-6676-4a09-8bb3-7fb0ad38b62e/volumes" Jan 23 07:18:14 crc kubenswrapper[5102]: I0123 07:18:14.533986 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerStarted","Data":"440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222"} Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.009389 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-csfzh"] Jan 23 07:18:15 crc kubenswrapper[5102]: E0123 07:18:15.010095 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerName="init" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.010111 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerName="init" Jan 23 07:18:15 crc kubenswrapper[5102]: E0123 07:18:15.010134 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerName="dnsmasq-dns" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.010140 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerName="dnsmasq-dns" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.010388 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e298e269-6676-4a09-8bb3-7fb0ad38b62e" containerName="dnsmasq-dns" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.011910 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.030433 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-csfzh"] Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.095724 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pg8bc\" (UniqueName: \"kubernetes.io/projected/bee350bf-58af-4e41-ad0e-2beeefb75a04-kube-api-access-pg8bc\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.095801 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-utilities\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.095876 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-catalog-content\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.197740 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pg8bc\" (UniqueName: \"kubernetes.io/projected/bee350bf-58af-4e41-ad0e-2beeefb75a04-kube-api-access-pg8bc\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.197779 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-utilities\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.197807 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-catalog-content\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.198316 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-catalog-content\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.198374 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-utilities\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.221112 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pg8bc\" (UniqueName: \"kubernetes.io/projected/bee350bf-58af-4e41-ad0e-2beeefb75a04-kube-api-access-pg8bc\") pod \"redhat-operators-csfzh\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.337681 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.550732 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerStarted","Data":"d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8"} Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.553877 5102 generic.go:334] "Generic (PLEG): container finished" podID="195d99d7-c8be-42f4-8f65-3209be1334b5" containerID="fb0523159313e01a611a22443a2546781a5f75a38f16084d9a077df4b793c4bb" exitCode=0 Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.553966 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hzkhv" event={"ID":"195d99d7-c8be-42f4-8f65-3209be1334b5","Type":"ContainerDied","Data":"fb0523159313e01a611a22443a2546781a5f75a38f16084d9a077df4b793c4bb"} Jan 23 07:18:15 crc kubenswrapper[5102]: I0123 07:18:15.832456 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-csfzh"] Jan 23 07:18:15 crc kubenswrapper[5102]: W0123 07:18:15.861533 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbee350bf_58af_4e41_ad0e_2beeefb75a04.slice/crio-798c65fa2c39d29095896e678ec3382da3752882484c7661f9d9304529e6f7dd WatchSource:0}: Error finding container 798c65fa2c39d29095896e678ec3382da3752882484c7661f9d9304529e6f7dd: Status 404 returned error can't find the container with id 798c65fa2c39d29095896e678ec3382da3752882484c7661f9d9304529e6f7dd Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.562972 5102 generic.go:334] "Generic (PLEG): container finished" podID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerID="db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0" exitCode=0 Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.563048 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-csfzh" event={"ID":"bee350bf-58af-4e41-ad0e-2beeefb75a04","Type":"ContainerDied","Data":"db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0"} Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.563237 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-csfzh" event={"ID":"bee350bf-58af-4e41-ad0e-2beeefb75a04","Type":"ContainerStarted","Data":"798c65fa2c39d29095896e678ec3382da3752882484c7661f9d9304529e6f7dd"} Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.565841 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerStarted","Data":"7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919"} Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.566559 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/ceilometer-0" Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.610038 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/ceilometer-0" podStartSLOduration=2.545131133 podStartE2EDuration="5.610023353s" podCreationTimestamp="2026-01-23 07:18:11 +0000 UTC" firstStartedPulling="2026-01-23 07:18:12.512234602 +0000 UTC m=+1443.332583577" lastFinishedPulling="2026-01-23 07:18:15.577126812 +0000 UTC m=+1446.397475797" observedRunningTime="2026-01-23 07:18:16.60705352 +0000 UTC m=+1447.427402495" watchObservedRunningTime="2026-01-23 07:18:16.610023353 +0000 UTC m=+1447.430372328" Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.768421 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.768484 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:18:16 crc kubenswrapper[5102]: I0123 07:18:16.965720 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.032759 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-combined-ca-bundle\") pod \"195d99d7-c8be-42f4-8f65-3209be1334b5\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.032916 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jt8tr\" (UniqueName: \"kubernetes.io/projected/195d99d7-c8be-42f4-8f65-3209be1334b5-kube-api-access-jt8tr\") pod \"195d99d7-c8be-42f4-8f65-3209be1334b5\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.032973 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-scripts\") pod \"195d99d7-c8be-42f4-8f65-3209be1334b5\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.033010 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-config-data\") pod \"195d99d7-c8be-42f4-8f65-3209be1334b5\" (UID: \"195d99d7-c8be-42f4-8f65-3209be1334b5\") " Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.040147 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/195d99d7-c8be-42f4-8f65-3209be1334b5-kube-api-access-jt8tr" (OuterVolumeSpecName: "kube-api-access-jt8tr") pod "195d99d7-c8be-42f4-8f65-3209be1334b5" (UID: "195d99d7-c8be-42f4-8f65-3209be1334b5"). InnerVolumeSpecName "kube-api-access-jt8tr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.040276 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-scripts" (OuterVolumeSpecName: "scripts") pod "195d99d7-c8be-42f4-8f65-3209be1334b5" (UID: "195d99d7-c8be-42f4-8f65-3209be1334b5"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.065728 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-config-data" (OuterVolumeSpecName: "config-data") pod "195d99d7-c8be-42f4-8f65-3209be1334b5" (UID: "195d99d7-c8be-42f4-8f65-3209be1334b5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.090271 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "195d99d7-c8be-42f4-8f65-3209be1334b5" (UID: "195d99d7-c8be-42f4-8f65-3209be1334b5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.133909 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jt8tr\" (UniqueName: \"kubernetes.io/projected/195d99d7-c8be-42f4-8f65-3209be1334b5-kube-api-access-jt8tr\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.133937 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.133946 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.133958 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/195d99d7-c8be-42f4-8f65-3209be1334b5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.619097 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-cell-mapping-hzkhv" event={"ID":"195d99d7-c8be-42f4-8f65-3209be1334b5","Type":"ContainerDied","Data":"62039f1a0865bddc8217d2a8bdc82299b38592a5967255ee48787040f2032a4e"} Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.619343 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="62039f1a0865bddc8217d2a8bdc82299b38592a5967255ee48787040f2032a4e" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.619124 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-cell-mapping-hzkhv" Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.621632 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-csfzh" event={"ID":"bee350bf-58af-4e41-ad0e-2beeefb75a04","Type":"ContainerStarted","Data":"e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546"} Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.797219 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.798171 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-api" containerID="cri-o://7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0" gracePeriod=30 Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.798125 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-log" containerID="cri-o://53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984" gracePeriod=30 Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.811421 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.811685 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="1f808f17-0e86-44de-a0d7-c326fe363e26" containerName="nova-scheduler-scheduler" containerID="cri-o://1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30" gracePeriod=30 Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.859097 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.859670 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-log" containerID="cri-o://e3164f5181fac42e2f9df3dc2c85e26eb9932232e7c524bab82d3e9336cbeec0" gracePeriod=30 Jan 23 07:18:17 crc kubenswrapper[5102]: I0123 07:18:17.860112 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-metadata" containerID="cri-o://95fec98cbec410ddb48024a5e38a2637ac677c9305b0383d43b141ef308abb41" gracePeriod=30 Jan 23 07:18:18 crc kubenswrapper[5102]: I0123 07:18:18.635040 5102 generic.go:334] "Generic (PLEG): container finished" podID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerID="e3164f5181fac42e2f9df3dc2c85e26eb9932232e7c524bab82d3e9336cbeec0" exitCode=143 Jan 23 07:18:18 crc kubenswrapper[5102]: I0123 07:18:18.635354 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b5e8976c-47bd-46ed-8b02-8001332ed939","Type":"ContainerDied","Data":"e3164f5181fac42e2f9df3dc2c85e26eb9932232e7c524bab82d3e9336cbeec0"} Jan 23 07:18:18 crc kubenswrapper[5102]: I0123 07:18:18.638210 5102 generic.go:334] "Generic (PLEG): container finished" podID="3ed35bcd-b42a-4517-948c-78b730416518" containerID="53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984" exitCode=143 Jan 23 07:18:18 crc kubenswrapper[5102]: I0123 07:18:18.638288 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3ed35bcd-b42a-4517-948c-78b730416518","Type":"ContainerDied","Data":"53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984"} Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.215440 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.334350 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-combined-ca-bundle\") pod \"3ed35bcd-b42a-4517-948c-78b730416518\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.334448 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vjvfv\" (UniqueName: \"kubernetes.io/projected/3ed35bcd-b42a-4517-948c-78b730416518-kube-api-access-vjvfv\") pod \"3ed35bcd-b42a-4517-948c-78b730416518\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.335309 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ed35bcd-b42a-4517-948c-78b730416518-logs\") pod \"3ed35bcd-b42a-4517-948c-78b730416518\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.335366 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-config-data\") pod \"3ed35bcd-b42a-4517-948c-78b730416518\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.335396 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-internal-tls-certs\") pod \"3ed35bcd-b42a-4517-948c-78b730416518\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.335475 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-public-tls-certs\") pod \"3ed35bcd-b42a-4517-948c-78b730416518\" (UID: \"3ed35bcd-b42a-4517-948c-78b730416518\") " Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.336416 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3ed35bcd-b42a-4517-948c-78b730416518-logs" (OuterVolumeSpecName: "logs") pod "3ed35bcd-b42a-4517-948c-78b730416518" (UID: "3ed35bcd-b42a-4517-948c-78b730416518"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.342711 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ed35bcd-b42a-4517-948c-78b730416518-kube-api-access-vjvfv" (OuterVolumeSpecName: "kube-api-access-vjvfv") pod "3ed35bcd-b42a-4517-948c-78b730416518" (UID: "3ed35bcd-b42a-4517-948c-78b730416518"). InnerVolumeSpecName "kube-api-access-vjvfv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.374526 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-config-data" (OuterVolumeSpecName: "config-data") pod "3ed35bcd-b42a-4517-948c-78b730416518" (UID: "3ed35bcd-b42a-4517-948c-78b730416518"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.384154 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "3ed35bcd-b42a-4517-948c-78b730416518" (UID: "3ed35bcd-b42a-4517-948c-78b730416518"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.392184 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "3ed35bcd-b42a-4517-948c-78b730416518" (UID: "3ed35bcd-b42a-4517-948c-78b730416518"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.418853 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "3ed35bcd-b42a-4517-948c-78b730416518" (UID: "3ed35bcd-b42a-4517-948c-78b730416518"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.437924 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.437971 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.437984 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vjvfv\" (UniqueName: \"kubernetes.io/projected/3ed35bcd-b42a-4517-948c-78b730416518-kube-api-access-vjvfv\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.437998 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/3ed35bcd-b42a-4517-948c-78b730416518-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.438011 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.438024 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/3ed35bcd-b42a-4517-948c-78b730416518-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.492884 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.494263 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.495420 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.495527 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="1f808f17-0e86-44de-a0d7-c326fe363e26" containerName="nova-scheduler-scheduler" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.665952 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.666011 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3ed35bcd-b42a-4517-948c-78b730416518","Type":"ContainerDied","Data":"7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0"} Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.665903 5102 generic.go:334] "Generic (PLEG): container finished" podID="3ed35bcd-b42a-4517-948c-78b730416518" containerID="7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0" exitCode=0 Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.666127 5102 scope.go:117] "RemoveContainer" containerID="7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.666141 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"3ed35bcd-b42a-4517-948c-78b730416518","Type":"ContainerDied","Data":"1b89818afb7d78130a58aef2435d8641d9cfa1b4171f1fce2587e3dfd6710e25"} Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.723705 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.738682 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.752583 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.753092 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-log" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.753108 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-log" Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.753140 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="195d99d7-c8be-42f4-8f65-3209be1334b5" containerName="nova-manage" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.753149 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="195d99d7-c8be-42f4-8f65-3209be1334b5" containerName="nova-manage" Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.753167 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-api" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.753176 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-api" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.753388 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-log" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.753425 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="195d99d7-c8be-42f4-8f65-3209be1334b5" containerName="nova-manage" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.753442 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="3ed35bcd-b42a-4517-948c-78b730416518" containerName="nova-api-api" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.754866 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.763671 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-public-svc" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.764064 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-internal-svc" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.767867 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.768436 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-config-data" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.799650 5102 scope.go:117] "RemoveContainer" containerID="53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.821518 5102 scope.go:117] "RemoveContainer" containerID="7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0" Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.822042 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0\": container with ID starting with 7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0 not found: ID does not exist" containerID="7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.822086 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0"} err="failed to get container status \"7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0\": rpc error: code = NotFound desc = could not find container \"7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0\": container with ID starting with 7431e220930406dc0fde03f58cea98d0e9907c5dae11082944610e28677846c0 not found: ID does not exist" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.822111 5102 scope.go:117] "RemoveContainer" containerID="53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984" Jan 23 07:18:19 crc kubenswrapper[5102]: E0123 07:18:19.822467 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984\": container with ID starting with 53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984 not found: ID does not exist" containerID="53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.822511 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984"} err="failed to get container status \"53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984\": rpc error: code = NotFound desc = could not find container \"53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984\": container with ID starting with 53cbf653c7a4e3b24b504887c26d0e4d5187daab140acf24201bca653f11f984 not found: ID does not exist" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.829637 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/kube-state-metrics-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.946978 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66d1a45d-2635-496c-92c1-86e3a686c5b8-logs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.947044 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-internal-tls-certs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.947196 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.947230 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rz9gq\" (UniqueName: \"kubernetes.io/projected/66d1a45d-2635-496c-92c1-86e3a686c5b8-kube-api-access-rz9gq\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.947250 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-config-data\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:19 crc kubenswrapper[5102]: I0123 07:18:19.947272 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-public-tls-certs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.049735 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.049803 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rz9gq\" (UniqueName: \"kubernetes.io/projected/66d1a45d-2635-496c-92c1-86e3a686c5b8-kube-api-access-rz9gq\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.049829 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-config-data\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.049857 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-public-tls-certs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.049882 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66d1a45d-2635-496c-92c1-86e3a686c5b8-logs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.049932 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-internal-tls-certs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.051258 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66d1a45d-2635-496c-92c1-86e3a686c5b8-logs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.053982 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-internal-tls-certs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.056954 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-config-data\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.058058 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-public-tls-certs\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.061607 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-combined-ca-bundle\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.072014 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rz9gq\" (UniqueName: \"kubernetes.io/projected/66d1a45d-2635-496c-92c1-86e3a686c5b8-kube-api-access-rz9gq\") pod \"nova-api-0\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.084693 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.608767 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.693382 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66d1a45d-2635-496c-92c1-86e3a686c5b8","Type":"ContainerStarted","Data":"c206bf1e7550deabf28d51b5a89a13aaeff7642c2c78bcedc20b956283e451df"} Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.697027 5102 generic.go:334] "Generic (PLEG): container finished" podID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerID="e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546" exitCode=0 Jan 23 07:18:20 crc kubenswrapper[5102]: I0123 07:18:20.697445 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-csfzh" event={"ID":"bee350bf-58af-4e41-ad0e-2beeefb75a04","Type":"ContainerDied","Data":"e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546"} Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.275993 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": dial tcp 10.217.0.196:8775: connect: connection refused" Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.275996 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.196:8775/\": dial tcp 10.217.0.196:8775: connect: connection refused" Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.616472 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ed35bcd-b42a-4517-948c-78b730416518" path="/var/lib/kubelet/pods/3ed35bcd-b42a-4517-948c-78b730416518/volumes" Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.745637 5102 generic.go:334] "Generic (PLEG): container finished" podID="1f808f17-0e86-44de-a0d7-c326fe363e26" containerID="1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30" exitCode=0 Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.746051 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f808f17-0e86-44de-a0d7-c326fe363e26","Type":"ContainerDied","Data":"1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30"} Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.751737 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66d1a45d-2635-496c-92c1-86e3a686c5b8","Type":"ContainerStarted","Data":"c00e7ef9f103d8dba5d16c6b74a391b96b09bc3b06eaa21681a7b180182aaf05"} Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.754263 5102 generic.go:334] "Generic (PLEG): container finished" podID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerID="95fec98cbec410ddb48024a5e38a2637ac677c9305b0383d43b141ef308abb41" exitCode=0 Jan 23 07:18:21 crc kubenswrapper[5102]: I0123 07:18:21.754288 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b5e8976c-47bd-46ed-8b02-8001332ed939","Type":"ContainerDied","Data":"95fec98cbec410ddb48024a5e38a2637ac677c9305b0383d43b141ef308abb41"} Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.026633 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.047819 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115059 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-combined-ca-bundle\") pod \"1f808f17-0e86-44de-a0d7-c326fe363e26\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115247 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fj6mj\" (UniqueName: \"kubernetes.io/projected/b5e8976c-47bd-46ed-8b02-8001332ed939-kube-api-access-fj6mj\") pod \"b5e8976c-47bd-46ed-8b02-8001332ed939\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115289 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5e8976c-47bd-46ed-8b02-8001332ed939-logs\") pod \"b5e8976c-47bd-46ed-8b02-8001332ed939\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115343 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-config-data\") pod \"b5e8976c-47bd-46ed-8b02-8001332ed939\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115402 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-nova-metadata-tls-certs\") pod \"b5e8976c-47bd-46ed-8b02-8001332ed939\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115433 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-combined-ca-bundle\") pod \"b5e8976c-47bd-46ed-8b02-8001332ed939\" (UID: \"b5e8976c-47bd-46ed-8b02-8001332ed939\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115483 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4kh44\" (UniqueName: \"kubernetes.io/projected/1f808f17-0e86-44de-a0d7-c326fe363e26-kube-api-access-4kh44\") pod \"1f808f17-0e86-44de-a0d7-c326fe363e26\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115586 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-config-data\") pod \"1f808f17-0e86-44de-a0d7-c326fe363e26\" (UID: \"1f808f17-0e86-44de-a0d7-c326fe363e26\") " Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.115966 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b5e8976c-47bd-46ed-8b02-8001332ed939-logs" (OuterVolumeSpecName: "logs") pod "b5e8976c-47bd-46ed-8b02-8001332ed939" (UID: "b5e8976c-47bd-46ed-8b02-8001332ed939"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.116318 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/b5e8976c-47bd-46ed-8b02-8001332ed939-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.137102 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b5e8976c-47bd-46ed-8b02-8001332ed939-kube-api-access-fj6mj" (OuterVolumeSpecName: "kube-api-access-fj6mj") pod "b5e8976c-47bd-46ed-8b02-8001332ed939" (UID: "b5e8976c-47bd-46ed-8b02-8001332ed939"). InnerVolumeSpecName "kube-api-access-fj6mj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.138797 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1f808f17-0e86-44de-a0d7-c326fe363e26-kube-api-access-4kh44" (OuterVolumeSpecName: "kube-api-access-4kh44") pod "1f808f17-0e86-44de-a0d7-c326fe363e26" (UID: "1f808f17-0e86-44de-a0d7-c326fe363e26"). InnerVolumeSpecName "kube-api-access-4kh44". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.174912 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "b5e8976c-47bd-46ed-8b02-8001332ed939" (UID: "b5e8976c-47bd-46ed-8b02-8001332ed939"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.189728 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-config-data" (OuterVolumeSpecName: "config-data") pod "1f808f17-0e86-44de-a0d7-c326fe363e26" (UID: "1f808f17-0e86-44de-a0d7-c326fe363e26"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.206864 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-config-data" (OuterVolumeSpecName: "config-data") pod "b5e8976c-47bd-46ed-8b02-8001332ed939" (UID: "b5e8976c-47bd-46ed-8b02-8001332ed939"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.210425 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1f808f17-0e86-44de-a0d7-c326fe363e26" (UID: "1f808f17-0e86-44de-a0d7-c326fe363e26"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.217283 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.217317 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.217331 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4kh44\" (UniqueName: \"kubernetes.io/projected/1f808f17-0e86-44de-a0d7-c326fe363e26-kube-api-access-4kh44\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.217342 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.217355 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1f808f17-0e86-44de-a0d7-c326fe363e26-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.217371 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fj6mj\" (UniqueName: \"kubernetes.io/projected/b5e8976c-47bd-46ed-8b02-8001332ed939-kube-api-access-fj6mj\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.222767 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "b5e8976c-47bd-46ed-8b02-8001332ed939" (UID: "b5e8976c-47bd-46ed-8b02-8001332ed939"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.318855 5102 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/b5e8976c-47bd-46ed-8b02-8001332ed939-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.764293 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"1f808f17-0e86-44de-a0d7-c326fe363e26","Type":"ContainerDied","Data":"37526bcb1643f38bf8d24650f2ab0561a91dc640200e0c40ed66ddef910f3daa"} Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.765331 5102 scope.go:117] "RemoveContainer" containerID="1df7d29ce86e007b5ebb17d4d41793103370075afc5ea691e66c11604db91f30" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.765505 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.779973 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66d1a45d-2635-496c-92c1-86e3a686c5b8","Type":"ContainerStarted","Data":"f4d4bf4c2380f1096c71e372015b67fe76544c3993ebfdd80decba57527e35ae"} Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.782884 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"b5e8976c-47bd-46ed-8b02-8001332ed939","Type":"ContainerDied","Data":"481aba155621706cfcb1050e2128b2adea8c884a348dcbd70b3e71a5101e038f"} Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.782904 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.792605 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-csfzh" event={"ID":"bee350bf-58af-4e41-ad0e-2beeefb75a04","Type":"ContainerStarted","Data":"18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c"} Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.804868 5102 scope.go:117] "RemoveContainer" containerID="95fec98cbec410ddb48024a5e38a2637ac677c9305b0383d43b141ef308abb41" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.815398 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.832854 5102 scope.go:117] "RemoveContainer" containerID="e3164f5181fac42e2f9df3dc2c85e26eb9932232e7c524bab82d3e9336cbeec0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.846363 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.869975 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:18:22 crc kubenswrapper[5102]: E0123 07:18:22.870491 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-log" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.870516 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-log" Jan 23 07:18:22 crc kubenswrapper[5102]: E0123 07:18:22.870562 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-metadata" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.870572 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-metadata" Jan 23 07:18:22 crc kubenswrapper[5102]: E0123 07:18:22.870582 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1f808f17-0e86-44de-a0d7-c326fe363e26" containerName="nova-scheduler-scheduler" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.870591 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1f808f17-0e86-44de-a0d7-c326fe363e26" containerName="nova-scheduler-scheduler" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.870830 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-metadata" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.870854 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1f808f17-0e86-44de-a0d7-c326fe363e26" containerName="nova-scheduler-scheduler" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.870875 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" containerName="nova-metadata-log" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.871768 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.874611 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-scheduler-config-data" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.881388 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-api-0" podStartSLOduration=3.881364724 podStartE2EDuration="3.881364724s" podCreationTimestamp="2026-01-23 07:18:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:18:22.82500353 +0000 UTC m=+1453.645352505" watchObservedRunningTime="2026-01-23 07:18:22.881364724 +0000 UTC m=+1453.701713699" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.909761 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.918247 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-csfzh" podStartSLOduration=4.031450776 podStartE2EDuration="8.918228248s" podCreationTimestamp="2026-01-23 07:18:14 +0000 UTC" firstStartedPulling="2026-01-23 07:18:16.565143258 +0000 UTC m=+1447.385492233" lastFinishedPulling="2026-01-23 07:18:21.45192071 +0000 UTC m=+1452.272269705" observedRunningTime="2026-01-23 07:18:22.854886575 +0000 UTC m=+1453.675235570" watchObservedRunningTime="2026-01-23 07:18:22.918228248 +0000 UTC m=+1453.738577223" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.930601 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.932780 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.932982 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-config-data\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.933093 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zssbh\" (UniqueName: \"kubernetes.io/projected/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-kube-api-access-zssbh\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.942953 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.951050 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.952725 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.958336 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-metadata-config-data" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.958617 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cert-nova-metadata-internal-svc" Jan 23 07:18:22 crc kubenswrapper[5102]: I0123 07:18:22.973838 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035071 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-config-data\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035132 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zssbh\" (UniqueName: \"kubernetes.io/projected/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-kube-api-access-zssbh\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035177 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47725711-7e88-4c25-8016-f70488231203-logs\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035275 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gh9lp\" (UniqueName: \"kubernetes.io/projected/47725711-7e88-4c25-8016-f70488231203-kube-api-access-gh9lp\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035322 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035366 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035399 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.035423 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-config-data\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.040059 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-config-data\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.052403 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-combined-ca-bundle\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.054752 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zssbh\" (UniqueName: \"kubernetes.io/projected/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-kube-api-access-zssbh\") pod \"nova-scheduler-0\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " pod="openstack/nova-scheduler-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.136292 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gh9lp\" (UniqueName: \"kubernetes.io/projected/47725711-7e88-4c25-8016-f70488231203-kube-api-access-gh9lp\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.136649 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.136694 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.136713 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-config-data\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.136789 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47725711-7e88-4c25-8016-f70488231203-logs\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.137244 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47725711-7e88-4c25-8016-f70488231203-logs\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.140663 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-combined-ca-bundle\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.140762 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-config-data\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.141504 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-nova-metadata-tls-certs\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.155991 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gh9lp\" (UniqueName: \"kubernetes.io/projected/47725711-7e88-4c25-8016-f70488231203-kube-api-access-gh9lp\") pod \"nova-metadata-0\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " pod="openstack/nova-metadata-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.202237 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:18:23 crc kubenswrapper[5102]: I0123 07:18:23.272777 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:23.608294 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1f808f17-0e86-44de-a0d7-c326fe363e26" path="/var/lib/kubelet/pods/1f808f17-0e86-44de-a0d7-c326fe363e26/volumes" Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:23.609322 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5e8976c-47bd-46ed-8b02-8001332ed939" path="/var/lib/kubelet/pods/b5e8976c-47bd-46ed-8b02-8001332ed939/volumes" Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:23.700108 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:23.805587 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f","Type":"ContainerStarted","Data":"d12295e27bf9420ee70e17e6014cb366f71ab7c11103757de5d74a01df2eb366"} Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:24.512887 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:18:24 crc kubenswrapper[5102]: W0123 07:18:24.517405 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod47725711_7e88_4c25_8016_f70488231203.slice/crio-1bf64d3506452b71b3c6d755c297e01e41f0ce2c2aaf8cc84930acdf1c5bb730 WatchSource:0}: Error finding container 1bf64d3506452b71b3c6d755c297e01e41f0ce2c2aaf8cc84930acdf1c5bb730: Status 404 returned error can't find the container with id 1bf64d3506452b71b3c6d755c297e01e41f0ce2c2aaf8cc84930acdf1c5bb730 Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:24.820481 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47725711-7e88-4c25-8016-f70488231203","Type":"ContainerStarted","Data":"37d6e75df8ea73ff65995440840d50d5ede836b60edd1fd4be81f18b7fb96153"} Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:24.820818 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47725711-7e88-4c25-8016-f70488231203","Type":"ContainerStarted","Data":"1bf64d3506452b71b3c6d755c297e01e41f0ce2c2aaf8cc84930acdf1c5bb730"} Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:24.823280 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f","Type":"ContainerStarted","Data":"18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782"} Jan 23 07:18:24 crc kubenswrapper[5102]: I0123 07:18:24.846975 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-scheduler-0" podStartSLOduration=2.846950466 podStartE2EDuration="2.846950466s" podCreationTimestamp="2026-01-23 07:18:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:18:24.837853412 +0000 UTC m=+1455.658202387" watchObservedRunningTime="2026-01-23 07:18:24.846950466 +0000 UTC m=+1455.667299441" Jan 23 07:18:25 crc kubenswrapper[5102]: I0123 07:18:25.338228 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:25 crc kubenswrapper[5102]: I0123 07:18:25.338405 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:25 crc kubenswrapper[5102]: I0123 07:18:25.844958 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47725711-7e88-4c25-8016-f70488231203","Type":"ContainerStarted","Data":"4d7b6d603a00e934b80420f8abd17cabe70620c234e7bd375a48fc68ea87c3ac"} Jan 23 07:18:25 crc kubenswrapper[5102]: I0123 07:18:25.878893 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openstack/nova-metadata-0" podStartSLOduration=3.878870715 podStartE2EDuration="3.878870715s" podCreationTimestamp="2026-01-23 07:18:22 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:18:25.870998379 +0000 UTC m=+1456.691347354" watchObservedRunningTime="2026-01-23 07:18:25.878870715 +0000 UTC m=+1456.699219690" Jan 23 07:18:26 crc kubenswrapper[5102]: I0123 07:18:26.393305 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-csfzh" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="registry-server" probeResult="failure" output=< Jan 23 07:18:26 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 07:18:26 crc kubenswrapper[5102]: > Jan 23 07:18:28 crc kubenswrapper[5102]: I0123 07:18:28.203412 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-scheduler-0" Jan 23 07:18:28 crc kubenswrapper[5102]: I0123 07:18:28.271462 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 07:18:28 crc kubenswrapper[5102]: I0123 07:18:28.274243 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-metadata-0" Jan 23 07:18:30 crc kubenswrapper[5102]: I0123 07:18:30.084942 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 07:18:30 crc kubenswrapper[5102]: I0123 07:18:30.085359 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-api-0" Jan 23 07:18:31 crc kubenswrapper[5102]: I0123 07:18:31.101796 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-api" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 07:18:31 crc kubenswrapper[5102]: I0123 07:18:31.101805 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-api-0" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-log" probeResult="failure" output="Get \"https://10.217.0.206:8774/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 07:18:33 crc kubenswrapper[5102]: I0123 07:18:33.203001 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-scheduler-0" Jan 23 07:18:33 crc kubenswrapper[5102]: I0123 07:18:33.257523 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-scheduler-0" Jan 23 07:18:33 crc kubenswrapper[5102]: I0123 07:18:33.271704 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 07:18:33 crc kubenswrapper[5102]: I0123 07:18:33.275096 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openstack/nova-metadata-0" Jan 23 07:18:33 crc kubenswrapper[5102]: I0123 07:18:33.992175 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-scheduler-0" Jan 23 07:18:34 crc kubenswrapper[5102]: I0123 07:18:34.284846 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 07:18:34 crc kubenswrapper[5102]: I0123 07:18:34.284871 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openstack/nova-metadata-0" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": net/http: request canceled (Client.Timeout exceeded while awaiting headers)" Jan 23 07:18:35 crc kubenswrapper[5102]: I0123 07:18:35.388908 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:35 crc kubenswrapper[5102]: I0123 07:18:35.439759 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:35 crc kubenswrapper[5102]: I0123 07:18:35.658140 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-csfzh"] Jan 23 07:18:36 crc kubenswrapper[5102]: I0123 07:18:36.981789 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-csfzh" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="registry-server" containerID="cri-o://18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c" gracePeriod=2 Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.580220 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.685777 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pg8bc\" (UniqueName: \"kubernetes.io/projected/bee350bf-58af-4e41-ad0e-2beeefb75a04-kube-api-access-pg8bc\") pod \"bee350bf-58af-4e41-ad0e-2beeefb75a04\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.686059 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-utilities\") pod \"bee350bf-58af-4e41-ad0e-2beeefb75a04\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.686126 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-catalog-content\") pod \"bee350bf-58af-4e41-ad0e-2beeefb75a04\" (UID: \"bee350bf-58af-4e41-ad0e-2beeefb75a04\") " Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.686582 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-utilities" (OuterVolumeSpecName: "utilities") pod "bee350bf-58af-4e41-ad0e-2beeefb75a04" (UID: "bee350bf-58af-4e41-ad0e-2beeefb75a04"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.694776 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bee350bf-58af-4e41-ad0e-2beeefb75a04-kube-api-access-pg8bc" (OuterVolumeSpecName: "kube-api-access-pg8bc") pod "bee350bf-58af-4e41-ad0e-2beeefb75a04" (UID: "bee350bf-58af-4e41-ad0e-2beeefb75a04"). InnerVolumeSpecName "kube-api-access-pg8bc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.789812 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pg8bc\" (UniqueName: \"kubernetes.io/projected/bee350bf-58af-4e41-ad0e-2beeefb75a04-kube-api-access-pg8bc\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.789846 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.834839 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bee350bf-58af-4e41-ad0e-2beeefb75a04" (UID: "bee350bf-58af-4e41-ad0e-2beeefb75a04"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:18:37 crc kubenswrapper[5102]: I0123 07:18:37.892283 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bee350bf-58af-4e41-ad0e-2beeefb75a04-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.000125 5102 generic.go:334] "Generic (PLEG): container finished" podID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerID="18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c" exitCode=0 Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.000184 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-csfzh" event={"ID":"bee350bf-58af-4e41-ad0e-2beeefb75a04","Type":"ContainerDied","Data":"18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c"} Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.000214 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-csfzh" event={"ID":"bee350bf-58af-4e41-ad0e-2beeefb75a04","Type":"ContainerDied","Data":"798c65fa2c39d29095896e678ec3382da3752882484c7661f9d9304529e6f7dd"} Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.000232 5102 scope.go:117] "RemoveContainer" containerID="18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.000232 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-csfzh" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.040933 5102 scope.go:117] "RemoveContainer" containerID="e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.090920 5102 scope.go:117] "RemoveContainer" containerID="db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.100420 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-csfzh"] Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.111338 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-csfzh"] Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.140346 5102 scope.go:117] "RemoveContainer" containerID="18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c" Jan 23 07:18:38 crc kubenswrapper[5102]: E0123 07:18:38.140983 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c\": container with ID starting with 18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c not found: ID does not exist" containerID="18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.141038 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c"} err="failed to get container status \"18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c\": rpc error: code = NotFound desc = could not find container \"18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c\": container with ID starting with 18771484f9a80957470dc30ed2de34fd90e1a956bdca906fa96acad167393a5c not found: ID does not exist" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.141066 5102 scope.go:117] "RemoveContainer" containerID="e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546" Jan 23 07:18:38 crc kubenswrapper[5102]: E0123 07:18:38.141525 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546\": container with ID starting with e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546 not found: ID does not exist" containerID="e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.141594 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546"} err="failed to get container status \"e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546\": rpc error: code = NotFound desc = could not find container \"e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546\": container with ID starting with e96ad2a71af8be8b08aec3decd5d3859950df7eb60947bedcfbaef7686dda546 not found: ID does not exist" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.141628 5102 scope.go:117] "RemoveContainer" containerID="db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0" Jan 23 07:18:38 crc kubenswrapper[5102]: E0123 07:18:38.141955 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0\": container with ID starting with db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0 not found: ID does not exist" containerID="db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0" Jan 23 07:18:38 crc kubenswrapper[5102]: I0123 07:18:38.141984 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0"} err="failed to get container status \"db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0\": rpc error: code = NotFound desc = could not find container \"db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0\": container with ID starting with db9f82b35433e3315e3b7626c0dab4a2f9e8f0e402d38e18e1e6790c34e6cbd0 not found: ID does not exist" Jan 23 07:18:39 crc kubenswrapper[5102]: I0123 07:18:39.613016 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" path="/var/lib/kubelet/pods/bee350bf-58af-4e41-ad0e-2beeefb75a04/volumes" Jan 23 07:18:40 crc kubenswrapper[5102]: I0123 07:18:40.095263 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 07:18:40 crc kubenswrapper[5102]: I0123 07:18:40.095958 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 07:18:40 crc kubenswrapper[5102]: I0123 07:18:40.096145 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-api-0" Jan 23 07:18:40 crc kubenswrapper[5102]: I0123 07:18:40.113192 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 07:18:41 crc kubenswrapper[5102]: I0123 07:18:41.035681 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openstack/nova-api-0" Jan 23 07:18:41 crc kubenswrapper[5102]: I0123 07:18:41.042347 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-api-0" Jan 23 07:18:42 crc kubenswrapper[5102]: I0123 07:18:42.014734 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/ceilometer-0" Jan 23 07:18:43 crc kubenswrapper[5102]: I0123 07:18:43.276853 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 07:18:43 crc kubenswrapper[5102]: I0123 07:18:43.284317 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openstack/nova-metadata-0" Jan 23 07:18:43 crc kubenswrapper[5102]: I0123 07:18:43.285408 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 07:18:44 crc kubenswrapper[5102]: I0123 07:18:44.082191 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openstack/nova-metadata-0" Jan 23 07:18:46 crc kubenswrapper[5102]: I0123 07:18:46.767849 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:18:46 crc kubenswrapper[5102]: I0123 07:18:46.768456 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:18:46 crc kubenswrapper[5102]: I0123 07:18:46.768513 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:18:46 crc kubenswrapper[5102]: I0123 07:18:46.769403 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"cdb7d8ca938d540b4197d3f803c2d9db00f127837a56b64d7ab62a996be59a8b"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:18:46 crc kubenswrapper[5102]: I0123 07:18:46.769485 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://cdb7d8ca938d540b4197d3f803c2d9db00f127837a56b64d7ab62a996be59a8b" gracePeriod=600 Jan 23 07:18:47 crc kubenswrapper[5102]: I0123 07:18:47.105336 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="cdb7d8ca938d540b4197d3f803c2d9db00f127837a56b64d7ab62a996be59a8b" exitCode=0 Jan 23 07:18:47 crc kubenswrapper[5102]: I0123 07:18:47.105373 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"cdb7d8ca938d540b4197d3f803c2d9db00f127837a56b64d7ab62a996be59a8b"} Jan 23 07:18:47 crc kubenswrapper[5102]: I0123 07:18:47.105455 5102 scope.go:117] "RemoveContainer" containerID="a733638e06484172d6918735e8bb55956644c4519eef105a0e4f5d17b554c3be" Jan 23 07:18:48 crc kubenswrapper[5102]: I0123 07:18:48.115403 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458"} Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.120965 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstackclient"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.121588 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstackclient" podUID="6d355347-569d-4082-b9fd-66d286ef59be" containerName="openstackclient" containerID="cri-o://f754eb55679f47320849d8ad1be68524f1721681ec6135a0821f6efc11b8ffb1" gracePeriod=2 Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.139437 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstackclient"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.212852 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-kmst8"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.227434 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-kmst8"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.262803 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-f350-account-create-update-576nm"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.282669 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-gzxp8"] Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.283089 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="extract-utilities" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.283100 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="extract-utilities" Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.283113 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="extract-content" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.283118 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="extract-content" Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.283134 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6d355347-569d-4082-b9fd-66d286ef59be" containerName="openstackclient" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.283141 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6d355347-569d-4082-b9fd-66d286ef59be" containerName="openstackclient" Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.283159 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="registry-server" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.283164 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="registry-server" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.283326 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="bee350bf-58af-4e41-ad0e-2beeefb75a04" containerName="registry-server" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.283351 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6d355347-569d-4082-b9fd-66d286ef59be" containerName="openstackclient" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.283995 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.290177 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-f350-account-create-update-576nm"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.290642 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.333437 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-gzxp8"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.375071 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-hqzgg"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.375264 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-metrics-hqzgg" podUID="df64b95b-fb03-49b3-b9e2-7d064e39c71b" containerName="openstack-network-exporter" containerID="cri-o://f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f" gracePeriod=30 Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.392778 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.393031 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="ovn-northd" containerID="cri-o://fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" gracePeriod=30 Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.393162 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-northd-0" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="openstack-network-exporter" containerID="cri-o://28fd2f580f926860b97dac693969e9dcc8ef486d9834e279e718727164266b75" gracePeriod=30 Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.409629 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/cinder-f350-account-create-update-h8rtr"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.411585 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.416074 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.423119 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/cinder-f350-account-create-update-h8rtr"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.470498 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v9nrp\" (UniqueName: \"kubernetes.io/projected/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-kube-api-access-v9nrp\") pod \"root-account-create-update-gzxp8\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.483859 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts\") pod \"root-account-create-update-gzxp8\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.509027 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.522362 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-h9gtx"] Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.546245 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.586940 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts\") pod \"root-account-create-update-gzxp8\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.587358 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ab8988-66ac-4643-b729-76d2575d0ad0-operator-scripts\") pod \"cinder-f350-account-create-update-h8rtr\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.587495 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2wt8t\" (UniqueName: \"kubernetes.io/projected/91ab8988-66ac-4643-b729-76d2575d0ad0-kube-api-access-2wt8t\") pod \"cinder-f350-account-create-update-h8rtr\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.587782 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v9nrp\" (UniqueName: \"kubernetes.io/projected/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-kube-api-access-v9nrp\") pod \"root-account-create-update-gzxp8\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.587807 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts\") pod \"root-account-create-update-gzxp8\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.587826 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-rkvv7"] Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.614048 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.614200 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="ovn-northd" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.632738 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v9nrp\" (UniqueName: \"kubernetes.io/projected/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-kube-api-access-v9nrp\") pod \"root-account-create-update-gzxp8\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.677699 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0326d61e-cade-48ab-87e9-7010d5f95ea8" path="/var/lib/kubelet/pods/0326d61e-cade-48ab-87e9-7010d5f95ea8/volumes" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.678874 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87ad8d4b-f678-4d24-add7-4af5cb947162" path="/var/lib/kubelet/pods/87ad8d4b-f678-4d24-add7-4af5cb947162/volumes" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.689351 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2wt8t\" (UniqueName: \"kubernetes.io/projected/91ab8988-66ac-4643-b729-76d2575d0ad0-kube-api-access-2wt8t\") pod \"cinder-f350-account-create-update-h8rtr\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.689834 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ab8988-66ac-4643-b729-76d2575d0ad0-operator-scripts\") pod \"cinder-f350-account-create-update-h8rtr\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.701939 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ab8988-66ac-4643-b729-76d2575d0ad0-operator-scripts\") pod \"cinder-f350-account-create-update-h8rtr\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.721923 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/placement-81e9-account-create-update-8xpx4"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.723077 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/placement-81e9-account-create-update-8xpx4"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.723098 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.723110 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/barbican-f904-account-create-update-9pq8l"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.723240 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.727105 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.738771 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/barbican-f904-account-create-update-9pq8l"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.738815 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/neutron-c788-account-create-update-8vnh5"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.740156 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.740434 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.743501 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.744645 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.746718 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2wt8t\" (UniqueName: \"kubernetes.io/projected/91ab8988-66ac-4643-b729-76d2575d0ad0-kube-api-access-2wt8t\") pod \"cinder-f350-account-create-update-h8rtr\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.770596 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-b741-account-create-update-vfjtx"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.795885 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-81e9-account-create-update-bjcgb"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.819745 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-81e9-account-create-update-bjcgb"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.840191 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-b741-account-create-update-vfjtx"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.855303 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f904-account-create-update-6s8dg"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.868224 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-f904-account-create-update-6s8dg"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.899950 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/neutron-c788-account-create-update-8vnh5"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.901086 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-operator-scripts\") pod \"placement-81e9-account-create-update-8xpx4\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.901111 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c23a8e6-4274-4c33-8f73-95b678f6509c-operator-scripts\") pod \"barbican-f904-account-create-update-9pq8l\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.901145 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2msc4\" (UniqueName: \"kubernetes.io/projected/3c23a8e6-4274-4c33-8f73-95b678f6509c-kube-api-access-2msc4\") pod \"barbican-f904-account-create-update-9pq8l\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.901184 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-operator-scripts\") pod \"neutron-c788-account-create-update-8vnh5\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.901201 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msnc5\" (UniqueName: \"kubernetes.io/projected/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-kube-api-access-msnc5\") pod \"neutron-c788-account-create-update-8vnh5\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.901266 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2kwx5\" (UniqueName: \"kubernetes.io/projected/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-kube-api-access-2kwx5\") pod \"placement-81e9-account-create-update-8xpx4\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.901989 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 23 07:19:07 crc kubenswrapper[5102]: E0123 07:19:07.902047 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data podName:f4fc3e1d-5fac-4696-a8eb-709db37b5ff6 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:08.402029459 +0000 UTC m=+1499.222378434 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data") pod "rabbitmq-server-0" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6") : configmap "rabbitmq-config-data" not found Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.916781 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.963731 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-api-8d62-account-create-update-cj4r8"] Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.965298 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.967738 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 23 07:19:07 crc kubenswrapper[5102]: I0123 07:19:07.995458 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-api-8d62-account-create-update-cj4r8"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.002564 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-operator-scripts\") pod \"placement-81e9-account-create-update-8xpx4\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.002616 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c23a8e6-4274-4c33-8f73-95b678f6509c-operator-scripts\") pod \"barbican-f904-account-create-update-9pq8l\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.002642 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2msc4\" (UniqueName: \"kubernetes.io/projected/3c23a8e6-4274-4c33-8f73-95b678f6509c-kube-api-access-2msc4\") pod \"barbican-f904-account-create-update-9pq8l\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.002683 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-operator-scripts\") pod \"neutron-c788-account-create-update-8vnh5\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.002702 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msnc5\" (UniqueName: \"kubernetes.io/projected/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-kube-api-access-msnc5\") pod \"neutron-c788-account-create-update-8vnh5\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.002757 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2kwx5\" (UniqueName: \"kubernetes.io/projected/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-kube-api-access-2kwx5\") pod \"placement-81e9-account-create-update-8xpx4\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.005111 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-operator-scripts\") pod \"placement-81e9-account-create-update-8xpx4\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.006068 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c23a8e6-4274-4c33-8f73-95b678f6509c-operator-scripts\") pod \"barbican-f904-account-create-update-9pq8l\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.006943 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-operator-scripts\") pod \"neutron-c788-account-create-update-8vnh5\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.046132 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.065047 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2kwx5\" (UniqueName: \"kubernetes.io/projected/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-kube-api-access-2kwx5\") pod \"placement-81e9-account-create-update-8xpx4\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.076639 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msnc5\" (UniqueName: \"kubernetes.io/projected/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-kube-api-access-msnc5\") pod \"neutron-c788-account-create-update-8vnh5\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.081109 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2msc4\" (UniqueName: \"kubernetes.io/projected/3c23a8e6-4274-4c33-8f73-95b678f6509c-kube-api-access-2msc4\") pod \"barbican-f904-account-create-update-9pq8l\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.131317 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cww7s\" (UniqueName: \"kubernetes.io/projected/a8fe4d72-b09b-4158-b4ea-c59192dbc956-kube-api-access-cww7s\") pod \"nova-api-8d62-account-create-update-cj4r8\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.131457 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fe4d72-b09b-4158-b4ea-c59192dbc956-operator-scripts\") pod \"nova-api-8d62-account-create-update-cj4r8\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.178211 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-l5rzm"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.182505 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.195297 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.207837 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.218779 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-sync-c2dfg"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.228450 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-sync-c2dfg"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.235446 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fe4d72-b09b-4158-b4ea-c59192dbc956-operator-scripts\") pod \"nova-api-8d62-account-create-update-cj4r8\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.235624 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cww7s\" (UniqueName: \"kubernetes.io/projected/a8fe4d72-b09b-4158-b4ea-c59192dbc956-kube-api-access-cww7s\") pod \"nova-api-8d62-account-create-update-cj4r8\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.236205 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fe4d72-b09b-4158-b4ea-c59192dbc956-operator-scripts\") pod \"nova-api-8d62-account-create-update-cj4r8\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.256043 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/nova-cell1-b157-account-create-update-cxcd7"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.256763 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.261986 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.287555 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.293378 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cww7s\" (UniqueName: \"kubernetes.io/projected/a8fe4d72-b09b-4158-b4ea-c59192dbc956-kube-api-access-cww7s\") pod \"nova-api-8d62-account-create-update-cj4r8\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.297708 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-l5rzm"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.322202 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-hqzgg_df64b95b-fb03-49b3-b9e2-7d064e39c71b/openstack-network-exporter/0.log" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.322264 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.324643 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-cxcd7"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.328891 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.338985 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-operator-scripts\") pod \"nova-cell1-b157-account-create-update-cxcd7\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.339040 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/599ccc3d-2e89-48e4-9db2-394cfd4364dc-operator-scripts\") pod \"nova-cell0-f3ca-account-create-update-l5rzm\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.339119 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2cfgp\" (UniqueName: \"kubernetes.io/projected/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-kube-api-access-2cfgp\") pod \"nova-cell1-b157-account-create-update-cxcd7\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.344739 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v2g64\" (UniqueName: \"kubernetes.io/projected/599ccc3d-2e89-48e4-9db2-394cfd4364dc-kube-api-access-v2g64\") pod \"nova-cell0-f3ca-account-create-update-l5rzm\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: E0123 07:19:08.358913 5102 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " execCommand=["/usr/share/ovn/scripts/ovn-ctl","stop_controller"] containerName="ovn-controller" pod="openstack/ovn-controller-rkvv7" message=< Jan 23 07:19:08 crc kubenswrapper[5102]: Exiting ovn-controller (1) [ OK ] Jan 23 07:19:08 crc kubenswrapper[5102]: > Jan 23 07:19:08 crc kubenswrapper[5102]: E0123 07:19:08.358950 5102 kuberuntime_container.go:691] "PreStop hook failed" err="command '/usr/share/ovn/scripts/ovn-ctl stop_controller' exited with 137: " pod="openstack/ovn-controller-rkvv7" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" containerID="cri-o://6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.358989 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-rkvv7" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" containerID="cri-o://6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.362180 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-sync-gp42d"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.385026 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-sync-gp42d"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.401837 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.409596 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c788-account-create-update-rwhzz"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.417170 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-metrics-hqzgg_df64b95b-fb03-49b3-b9e2-7d064e39c71b/openstack-network-exporter/0.log" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.417215 5102 generic.go:334] "Generic (PLEG): container finished" podID="df64b95b-fb03-49b3-b9e2-7d064e39c71b" containerID="f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f" exitCode=2 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.417272 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-hqzgg" event={"ID":"df64b95b-fb03-49b3-b9e2-7d064e39c71b","Type":"ContainerDied","Data":"f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f"} Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.417298 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-metrics-hqzgg" event={"ID":"df64b95b-fb03-49b3-b9e2-7d064e39c71b","Type":"ContainerDied","Data":"f3550d5d54284bec1edb547d932f3d06b05a9be1ac8ac1d1a7b18b179a553b07"} Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.417315 5102 scope.go:117] "RemoveContainer" containerID="f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.417439 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-metrics-hqzgg" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.449213 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w29tf\" (UniqueName: \"kubernetes.io/projected/df64b95b-fb03-49b3-b9e2-7d064e39c71b-kube-api-access-w29tf\") pod \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.449342 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-metrics-certs-tls-certs\") pod \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.449371 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovs-rundir\") pod \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.449474 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovn-rundir\") pod \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.449570 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-combined-ca-bundle\") pod \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.449659 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df64b95b-fb03-49b3-b9e2-7d064e39c71b-config\") pod \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\" (UID: \"df64b95b-fb03-49b3-b9e2-7d064e39c71b\") " Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.455757 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovs-rundir" (OuterVolumeSpecName: "ovs-rundir") pod "df64b95b-fb03-49b3-b9e2-7d064e39c71b" (UID: "df64b95b-fb03-49b3-b9e2-7d064e39c71b"). InnerVolumeSpecName "ovs-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.464045 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/df64b95b-fb03-49b3-b9e2-7d064e39c71b-kube-api-access-w29tf" (OuterVolumeSpecName: "kube-api-access-w29tf") pod "df64b95b-fb03-49b3-b9e2-7d064e39c71b" (UID: "df64b95b-fb03-49b3-b9e2-7d064e39c71b"). InnerVolumeSpecName "kube-api-access-w29tf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.467846 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/df64b95b-fb03-49b3-b9e2-7d064e39c71b-config" (OuterVolumeSpecName: "config") pod "df64b95b-fb03-49b3-b9e2-7d064e39c71b" (UID: "df64b95b-fb03-49b3-b9e2-7d064e39c71b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.468089 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "df64b95b-fb03-49b3-b9e2-7d064e39c71b" (UID: "df64b95b-fb03-49b3-b9e2-7d064e39c71b"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.470970 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-operator-scripts\") pod \"nova-cell1-b157-account-create-update-cxcd7\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.471048 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/599ccc3d-2e89-48e4-9db2-394cfd4364dc-operator-scripts\") pod \"nova-cell0-f3ca-account-create-update-l5rzm\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.471205 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2cfgp\" (UniqueName: \"kubernetes.io/projected/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-kube-api-access-2cfgp\") pod \"nova-cell1-b157-account-create-update-cxcd7\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.471500 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v2g64\" (UniqueName: \"kubernetes.io/projected/599ccc3d-2e89-48e4-9db2-394cfd4364dc-kube-api-access-v2g64\") pod \"nova-cell0-f3ca-account-create-update-l5rzm\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.472093 5102 reconciler_common.go:293] "Volume detached for volume \"ovs-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovs-rundir\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.472116 5102 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/host-path/df64b95b-fb03-49b3-b9e2-7d064e39c71b-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.472126 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/df64b95b-fb03-49b3-b9e2-7d064e39c71b-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.472135 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w29tf\" (UniqueName: \"kubernetes.io/projected/df64b95b-fb03-49b3-b9e2-7d064e39c71b-kube-api-access-w29tf\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:08 crc kubenswrapper[5102]: E0123 07:19:08.475081 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 23 07:19:08 crc kubenswrapper[5102]: E0123 07:19:08.475165 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data podName:f4fc3e1d-5fac-4696-a8eb-709db37b5ff6 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:09.475144077 +0000 UTC m=+1500.295493052 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data") pod "rabbitmq-server-0" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6") : configmap "rabbitmq-config-data" not found Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.475440 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/599ccc3d-2e89-48e4-9db2-394cfd4364dc-operator-scripts\") pod \"nova-cell0-f3ca-account-create-update-l5rzm\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.475819 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-sync-2kgk7"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.490371 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-operator-scripts\") pod \"nova-cell1-b157-account-create-update-cxcd7\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.527040 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2cfgp\" (UniqueName: \"kubernetes.io/projected/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-kube-api-access-2cfgp\") pod \"nova-cell1-b157-account-create-update-cxcd7\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.538970 5102 generic.go:334] "Generic (PLEG): container finished" podID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerID="28fd2f580f926860b97dac693969e9dcc8ef486d9834e279e718727164266b75" exitCode=2 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.539100 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1a44c7a2-d363-4438-b9db-ebd62b910427","Type":"ContainerDied","Data":"28fd2f580f926860b97dac693969e9dcc8ef486d9834e279e718727164266b75"} Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.626922 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v2g64\" (UniqueName: \"kubernetes.io/projected/599ccc3d-2e89-48e4-9db2-394cfd4364dc-kube-api-access-v2g64\") pod \"nova-cell0-f3ca-account-create-update-l5rzm\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.680473 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-c788-account-create-update-rwhzz"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.683980 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.726702 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-sync-2kgk7"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.745599 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.746137 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-server" containerID="cri-o://403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747210 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-updater" containerID="cri-o://8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747282 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="swift-recon-cron" containerID="cri-o://f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747324 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="rsync" containerID="cri-o://99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747359 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-expirer" containerID="cri-o://1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747387 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-updater" containerID="cri-o://cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747414 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-auditor" containerID="cri-o://6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747443 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-replicator" containerID="cri-o://67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747472 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-server" containerID="cri-o://0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747522 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-reaper" containerID="cri-o://fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747594 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-auditor" containerID="cri-o://860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747627 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-replicator" containerID="cri-o://e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747654 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-server" containerID="cri-o://a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747693 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-replicator" containerID="cri-o://9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.747729 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-storage-0" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-auditor" containerID="cri-o://166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93" gracePeriod=30 Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.923925 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.925088 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-8d62-account-create-update-7r467"] Jan 23 07:19:08 crc kubenswrapper[5102]: I0123 07:19:08.975074 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-8d62-account-create-update-7r467"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:08.998693 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "df64b95b-fb03-49b3-b9e2-7d064e39c71b" (UID: "df64b95b-fb03-49b3-b9e2-7d064e39c71b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.035531 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-9fbct"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.035893 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" podUID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerName="dnsmasq-dns" containerID="cri-o://f4d5919866b49897b78006ffb1208e46c4edbce085f06eb3868680b3cfad3178" gracePeriod=10 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.047072 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "df64b95b-fb03-49b3-b9e2-7d064e39c71b" (UID: "df64b95b-fb03-49b3-b9e2-7d064e39c71b"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.074595 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.098132 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.098156 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/df64b95b-fb03-49b3-b9e2-7d064e39c71b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.215160 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-wz8lf"] Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.233381 5102 handlers.go:78] "Exec lifecycle hook for Container in Pod failed" err=< Jan 23 07:19:09 crc kubenswrapper[5102]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 23 07:19:09 crc kubenswrapper[5102]: + source /usr/local/bin/container-scripts/functions Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNBridge=br-int Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNRemote=tcp:localhost:6642 Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNEncapType=geneve Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNAvailabilityZones= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ EnableChassisAsGateway=true Jan 23 07:19:09 crc kubenswrapper[5102]: ++ PhysicalNetworks= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNHostName= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 23 07:19:09 crc kubenswrapper[5102]: ++ ovs_dir=/var/lib/openvswitch Jan 23 07:19:09 crc kubenswrapper[5102]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 23 07:19:09 crc kubenswrapper[5102]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 23 07:19:09 crc kubenswrapper[5102]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + sleep 0.5 Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + sleep 0.5 Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + cleanup_ovsdb_server_semaphore Jan 23 07:19:09 crc kubenswrapper[5102]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 23 07:19:09 crc kubenswrapper[5102]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 23 07:19:09 crc kubenswrapper[5102]: > execCommand=["/usr/local/bin/container-scripts/stop-ovsdb-server.sh"] containerName="ovsdb-server" pod="openstack/ovn-controller-ovs-h9gtx" message=< Jan 23 07:19:09 crc kubenswrapper[5102]: Exiting ovsdb-server (5) [ OK ] Jan 23 07:19:09 crc kubenswrapper[5102]: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 23 07:19:09 crc kubenswrapper[5102]: + source /usr/local/bin/container-scripts/functions Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNBridge=br-int Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNRemote=tcp:localhost:6642 Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNEncapType=geneve Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNAvailabilityZones= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ EnableChassisAsGateway=true Jan 23 07:19:09 crc kubenswrapper[5102]: ++ PhysicalNetworks= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNHostName= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 23 07:19:09 crc kubenswrapper[5102]: ++ ovs_dir=/var/lib/openvswitch Jan 23 07:19:09 crc kubenswrapper[5102]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 23 07:19:09 crc kubenswrapper[5102]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 23 07:19:09 crc kubenswrapper[5102]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + sleep 0.5 Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + sleep 0.5 Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + cleanup_ovsdb_server_semaphore Jan 23 07:19:09 crc kubenswrapper[5102]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 23 07:19:09 crc kubenswrapper[5102]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 23 07:19:09 crc kubenswrapper[5102]: > Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.233421 5102 kuberuntime_container.go:691] "PreStop hook failed" err=< Jan 23 07:19:09 crc kubenswrapper[5102]: command '/usr/local/bin/container-scripts/stop-ovsdb-server.sh' exited with 137: ++ dirname /usr/local/bin/container-scripts/stop-ovsdb-server.sh Jan 23 07:19:09 crc kubenswrapper[5102]: + source /usr/local/bin/container-scripts/functions Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNBridge=br-int Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNRemote=tcp:localhost:6642 Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNEncapType=geneve Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNAvailabilityZones= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ EnableChassisAsGateway=true Jan 23 07:19:09 crc kubenswrapper[5102]: ++ PhysicalNetworks= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ OVNHostName= Jan 23 07:19:09 crc kubenswrapper[5102]: ++ DB_FILE=/etc/openvswitch/conf.db Jan 23 07:19:09 crc kubenswrapper[5102]: ++ ovs_dir=/var/lib/openvswitch Jan 23 07:19:09 crc kubenswrapper[5102]: ++ FLOWS_RESTORE_SCRIPT=/var/lib/openvswitch/flows-script Jan 23 07:19:09 crc kubenswrapper[5102]: ++ FLOWS_RESTORE_DIR=/var/lib/openvswitch/saved-flows Jan 23 07:19:09 crc kubenswrapper[5102]: ++ SAFE_TO_STOP_OVSDB_SERVER_SEMAPHORE=/var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + sleep 0.5 Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + sleep 0.5 Jan 23 07:19:09 crc kubenswrapper[5102]: + '[' '!' -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server ']' Jan 23 07:19:09 crc kubenswrapper[5102]: + cleanup_ovsdb_server_semaphore Jan 23 07:19:09 crc kubenswrapper[5102]: + rm -f /var/lib/openvswitch/is_safe_to_stop_ovsdb_server Jan 23 07:19:09 crc kubenswrapper[5102]: + /usr/share/openvswitch/scripts/ovs-ctl stop --no-ovs-vswitchd Jan 23 07:19:09 crc kubenswrapper[5102]: > pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" containerID="cri-o://89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.233462 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" containerID="cri-o://89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" gracePeriod=29 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.235501 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" containerID="cri-o://e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" gracePeriod=29 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.240208 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-wz8lf"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.272864 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-sync-62l6f"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.302307 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-sync-62l6f"] Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.319891 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.320004 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data podName:1ea732e7-d11d-4e12-9d44-f8fcafa50de5 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:09.819967071 +0000 UTC m=+1500.640316036 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data") pod "rabbitmq-cell1-server-0" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5") : configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.327731 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-5dwdj"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.347824 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-ring-rebalance-7ctb2"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.364775 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-ring-rebalance-7ctb2"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.392047 5102 scope.go:117] "RemoveContainer" containerID="f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.399498 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-5dwdj"] Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.415453 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f\": container with ID starting with f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f not found: ID does not exist" containerID="f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.415503 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f"} err="failed to get container status \"f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f\": rpc error: code = NotFound desc = could not find container \"f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f\": container with ID starting with f7b166f98104ea4a76d49aa2ac43cefb71c999dcf9d55d27d63911c5ba3b8e6f not found: ID does not exist" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.447453 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-sync-d2mqk"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.485146 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-sync-d2mqk"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.529562 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.530223 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="openstack-network-exporter" containerID="cri-o://744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b" gracePeriod=300 Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.535497 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.535659 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data podName:f4fc3e1d-5fac-4696-a8eb-709db37b5ff6 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:11.535642295 +0000 UTC m=+1502.355991270 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data") pod "rabbitmq-server-0" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6") : configmap "rabbitmq-config-data" not found Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.740700 5102 generic.go:334] "Generic (PLEG): container finished" podID="6d355347-569d-4082-b9fd-66d286ef59be" containerID="f754eb55679f47320849d8ad1be68524f1721681ec6135a0821f6efc11b8ffb1" exitCode=137 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.749968 5102 generic.go:334] "Generic (PLEG): container finished" podID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.753180 5102 generic.go:334] "Generic (PLEG): container finished" podID="fb784258-3999-4323-8ef6-06631e94e61f" containerID="6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.762753 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-sb-0" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="ovsdbserver-sb" containerID="cri-o://15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c" gracePeriod=300 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804267 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804317 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804325 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804331 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804339 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804345 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804351 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804377 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804383 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804390 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804400 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804408 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804414 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.804420 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.816199 5102 generic.go:334] "Generic (PLEG): container finished" podID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerID="f4d5919866b49897b78006ffb1208e46c4edbce085f06eb3868680b3cfad3178" exitCode=0 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.857580 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="19832d65-364c-4340-9109-57b179d8a14c" path="/var/lib/kubelet/pods/19832d65-364c-4340-9109-57b179d8a14c/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.858265 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.858348 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data podName:1ea732e7-d11d-4e12-9d44-f8fcafa50de5 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:10.858328691 +0000 UTC m=+1501.678677756 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data") pod "rabbitmq-cell1-server-0" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5") : configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.889482 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-cell1-mariadb-root-db-secret" Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.897874 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:09 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:09 crc kubenswrapper[5102]: Jan 23 07:19:09 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:09 crc kubenswrapper[5102]: Jan 23 07:19:09 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:09 crc kubenswrapper[5102]: Jan 23 07:19:09 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:09 crc kubenswrapper[5102]: Jan 23 07:19:09 crc kubenswrapper[5102]: if [ -n "" ]; then Jan 23 07:19:09 crc kubenswrapper[5102]: GRANT_DATABASE="" Jan 23 07:19:09 crc kubenswrapper[5102]: else Jan 23 07:19:09 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:09 crc kubenswrapper[5102]: fi Jan 23 07:19:09 crc kubenswrapper[5102]: Jan 23 07:19:09 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:09 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:09 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:09 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:09 crc kubenswrapper[5102]: # support updates Jan 23 07:19:09 crc kubenswrapper[5102]: Jan 23 07:19:09 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.903152 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-gzxp8" podUID="278bc0a5-d40a-4983-b8bd-ae5b8e6af12d" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.913587 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1b3f08fd-2ae7-419b-8b88-637138d66302" path="/var/lib/kubelet/pods/1b3f08fd-2ae7-419b-8b88-637138d66302/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.917663 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="27b98baa-da03-46ae-8af3-ca99483f0007" path="/var/lib/kubelet/pods/27b98baa-da03-46ae-8af3-ca99483f0007/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.920148 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ec7cc04-6c30-49ca-91c8-99bb4200af09" path="/var/lib/kubelet/pods/2ec7cc04-6c30-49ca-91c8-99bb4200af09/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.931207 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="33b32693-d02a-42ef-b749-3e0b883b3227" path="/var/lib/kubelet/pods/33b32693-d02a-42ef-b749-3e0b883b3227/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.932239 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4806efa3-cb85-4d29-956d-63bf181c16be" path="/var/lib/kubelet/pods/4806efa3-cb85-4d29-956d-63bf181c16be/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.933518 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6b555d5d-9388-40a6-b4c5-7d0edd8c3e68" path="/var/lib/kubelet/pods/6b555d5d-9388-40a6-b4c5-7d0edd8c3e68/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.937049 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9080948-c87d-49da-b53e-b5228f44a2d4" path="/var/lib/kubelet/pods/a9080948-c87d-49da-b53e-b5228f44a2d4/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.937690 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ad1791e1-86ab-44e5-99e9-399e93cffc68" path="/var/lib/kubelet/pods/ad1791e1-86ab-44e5-99e9-399e93cffc68/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.938554 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d17cb94c-536a-4a89-aac5-802cc52ae2ce" path="/var/lib/kubelet/pods/d17cb94c-536a-4a89-aac5-802cc52ae2ce/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.941209 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc3a1422-92f2-45be-9e26-4768b42d9505" path="/var/lib/kubelet/pods/dc3a1422-92f2-45be-9e26-4768b42d9505/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.943691 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ee7792b2-73da-4fe7-b0c2-95ab1c382b51" path="/var/lib/kubelet/pods/ee7792b2-73da-4fe7-b0c2-95ab1c382b51/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.944262 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f9dedf34-d437-4ae1-ada2-46f4ad2b0320" path="/var/lib/kubelet/pods/f9dedf34-d437-4ae1-ada2-46f4ad2b0320/volumes" Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.945890 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.946166 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.951524 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-h9gtx" event={"ID":"ac05d076-9929-479c-b5be-43eed0ee2dcc","Type":"ContainerDied","Data":"89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a"} Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.952241 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rkvv7" event={"ID":"fb784258-3999-4323-8ef6-06631e94e61f","Type":"ContainerDied","Data":"6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9"} Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.952356 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8"} Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.952531 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68"} Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.952633 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332"} Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.952691 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012"} Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.952192 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="openstack-network-exporter" containerID="cri-o://e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039" gracePeriod=300 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.946451 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="cinder-scheduler" containerID="cri-o://fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb" gracePeriod=30 Jan 23 07:19:09 crc kubenswrapper[5102]: I0123 07:19:09.949020 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-scheduler-0" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="probe" containerID="cri-o://d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44" gracePeriod=30 Jan 23 07:19:09 crc kubenswrapper[5102]: E0123 07:19:09.987867 5102 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb784258_3999_4323_8ef6_06631e94e61f.slice/crio-conmon-6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20474222_aadd_44c0_8c4e_f0b4bd0147c5.slice/crio-166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20474222_aadd_44c0_8c4e_f0b4bd0147c5.slice/crio-conmon-cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20474222_aadd_44c0_8c4e_f0b4bd0147c5.slice/crio-conmon-67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20474222_aadd_44c0_8c4e_f0b4bd0147c5.slice/crio-e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podfb784258_3999_4323_8ef6_06631e94e61f.slice/crio-6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20474222_aadd_44c0_8c4e_f0b4bd0147c5.slice/crio-860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20474222_aadd_44c0_8c4e_f0b4bd0147c5.slice/crio-conmon-166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93.scope\": RecentStats: unable to find data in memory cache]" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:09.952867 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.020420 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.020505 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.020597 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.021344 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.021461 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.021773 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.022167 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.023159 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.023381 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.023636 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.023737 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" event={"ID":"241a02f1-ca6d-4c3c-b635-2156947f47c4","Type":"ContainerDied","Data":"f4d5919866b49897b78006ffb1208e46c4edbce085f06eb3868680b3cfad3178"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.020966 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-httpd" containerID="cri-o://4f979b76f22ef2e8f8509c19caa21930b8a908a0e7b25aba0b15129e8e286021" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.020906 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-internal-api-0" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-log" containerID="cri-o://868d87cea06d5b9482b8147a33f18e2828a731b3e1fb46272675463a760abf4f" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.026526 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5f55c94446-2fcrd"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.026932 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5f55c94446-2fcrd" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-log" containerID="cri-o://04a75c697ea1aac00dffbc51b878b9c90262d7c394882f3e8e4fead3dde40397" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.027133 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/placement-5f55c94446-2fcrd" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-api" containerID="cri-o://d62cf3f61ec961d54c0543a7c6db6538a2fa229a7aa3236626738a9910298f8a" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.041501 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.043279 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api-log" containerID="cri-o://38ba8da046ac14dc360b77fb7112dee42133d1a68989da117321421af10dcea2" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.046261 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/cinder-api-0" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api" containerID="cri-o://a4b253153a5b0ae4b7304fc69166a78bdc78f9b33184fefd123a47d6a29e02a7" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.059341 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 07:19:10 crc kubenswrapper[5102]: W0123 07:19:10.069174 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod91ab8988_66ac_4643_b729_76d2575d0ad0.slice/crio-d8b233317f27c289cc069464d0896a958f4ff44a9803a3330a4c763a670e10c0 WatchSource:0}: Error finding container d8b233317f27c289cc069464d0896a958f4ff44a9803a3330a4c763a670e10c0: Status 404 returned error can't find the container with id d8b233317f27c289cc069464d0896a958f4ff44a9803a3330a4c763a670e10c0 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.070157 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.075982 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-cell-mapping-7sv4q"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.084576 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-cell-mapping-7sv4q"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.085772 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"cinder-db-secret" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.094212 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "cinder" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="cinder" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.094643 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ovsdbserver-nb-0" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="ovsdbserver-nb" containerID="cri-o://eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" gracePeriod=300 Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.095820 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-f350-account-create-update-h8rtr" podUID="91ab8988-66ac-4643-b729-76d2575d0ad0" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.107995 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.129246 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-cell-mapping-hzkhv"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.142589 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-cell-mapping-hzkhv"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170611 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run-ovn\") pod \"fb784258-3999-4323-8ef6-06631e94e61f\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170684 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config\") pod \"6d355347-569d-4082-b9fd-66d286ef59be\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170764 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-combined-ca-bundle\") pod \"6d355347-569d-4082-b9fd-66d286ef59be\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170828 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cqqsd\" (UniqueName: \"kubernetes.io/projected/fb784258-3999-4323-8ef6-06631e94e61f-kube-api-access-cqqsd\") pod \"fb784258-3999-4323-8ef6-06631e94e61f\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170865 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config-secret\") pod \"6d355347-569d-4082-b9fd-66d286ef59be\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170884 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-log-ovn\") pod \"fb784258-3999-4323-8ef6-06631e94e61f\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170914 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run\") pod \"fb784258-3999-4323-8ef6-06631e94e61f\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170951 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-combined-ca-bundle\") pod \"fb784258-3999-4323-8ef6-06631e94e61f\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.170989 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fb784258-3999-4323-8ef6-06631e94e61f-scripts\") pod \"fb784258-3999-4323-8ef6-06631e94e61f\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.171024 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrfwk\" (UniqueName: \"kubernetes.io/projected/6d355347-569d-4082-b9fd-66d286ef59be-kube-api-access-zrfwk\") pod \"6d355347-569d-4082-b9fd-66d286ef59be\" (UID: \"6d355347-569d-4082-b9fd-66d286ef59be\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.171067 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-ovn-controller-tls-certs\") pod \"fb784258-3999-4323-8ef6-06631e94e61f\" (UID: \"fb784258-3999-4323-8ef6-06631e94e61f\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.176512 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run-ovn" (OuterVolumeSpecName: "var-run-ovn") pod "fb784258-3999-4323-8ef6-06631e94e61f" (UID: "fb784258-3999-4323-8ef6-06631e94e61f"). InnerVolumeSpecName "var-run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.177492 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-log-ovn" (OuterVolumeSpecName: "var-log-ovn") pod "fb784258-3999-4323-8ef6-06631e94e61f" (UID: "fb784258-3999-4323-8ef6-06631e94e61f"). InnerVolumeSpecName "var-log-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.177512 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run" (OuterVolumeSpecName: "var-run") pod "fb784258-3999-4323-8ef6-06631e94e61f" (UID: "fb784258-3999-4323-8ef6-06631e94e61f"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.180208 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fb784258-3999-4323-8ef6-06631e94e61f-scripts" (OuterVolumeSpecName: "scripts") pod "fb784258-3999-4323-8ef6-06631e94e61f" (UID: "fb784258-3999-4323-8ef6-06631e94e61f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.193026 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6d355347-569d-4082-b9fd-66d286ef59be-kube-api-access-zrfwk" (OuterVolumeSpecName: "kube-api-access-zrfwk") pod "6d355347-569d-4082-b9fd-66d286ef59be" (UID: "6d355347-569d-4082-b9fd-66d286ef59be"). InnerVolumeSpecName "kube-api-access-zrfwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.215405 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fb784258-3999-4323-8ef6-06631e94e61f-kube-api-access-cqqsd" (OuterVolumeSpecName: "kube-api-access-cqqsd") pod "fb784258-3999-4323-8ef6-06631e94e61f" (UID: "fb784258-3999-4323-8ef6-06631e94e61f"). InnerVolumeSpecName "kube-api-access-cqqsd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.216950 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e is running failed: container process not found" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.224249 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e is running failed: container process not found" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.226914 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e is running failed: container process not found" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" cmd=["/usr/bin/pidof","ovsdb-server"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.226948 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e is running failed: container process not found" probeType="Readiness" pod="openstack/ovsdbserver-nb-0" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="ovsdbserver-nb" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.229371 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.229674 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-log" containerID="cri-o://107c5f3d9db926ad82c1955c3fd0cba07ea73b8de197843699ad0edcedf0354b" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.230122 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/glance-default-external-api-0" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-httpd" containerID="cri-o://d16b0a4419002db2415cab085fc8a5390ea935e4fea5424b97b0f8ead9c68fef" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.239730 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-db-create-6wzvt"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.249070 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6d355347-569d-4082-b9fd-66d286ef59be" (UID: "6d355347-569d-4082-b9fd-66d286ef59be"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.249271 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-db-create-6wzvt"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.256876 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-f350-account-create-update-h8rtr"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.272794 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-svc\") pod \"241a02f1-ca6d-4c3c-b635-2156947f47c4\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.273450 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-nb\") pod \"241a02f1-ca6d-4c3c-b635-2156947f47c4\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.273572 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-sb\") pod \"241a02f1-ca6d-4c3c-b635-2156947f47c4\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.273891 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-config\") pod \"241a02f1-ca6d-4c3c-b635-2156947f47c4\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.274043 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-swift-storage-0\") pod \"241a02f1-ca6d-4c3c-b635-2156947f47c4\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.274258 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cpz7h\" (UniqueName: \"kubernetes.io/projected/241a02f1-ca6d-4c3c-b635-2156947f47c4-kube-api-access-cpz7h\") pod \"241a02f1-ca6d-4c3c-b635-2156947f47c4\" (UID: \"241a02f1-ca6d-4c3c-b635-2156947f47c4\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.274750 5102 reconciler_common.go:293] "Volume detached for volume \"var-run-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.275620 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.275751 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cqqsd\" (UniqueName: \"kubernetes.io/projected/fb784258-3999-4323-8ef6-06631e94e61f-kube-api-access-cqqsd\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.275863 5102 reconciler_common.go:293] "Volume detached for volume \"var-log-ovn\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-log-ovn\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.276151 5102 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/fb784258-3999-4323-8ef6-06631e94e61f-var-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.276239 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/fb784258-3999-4323-8ef6-06631e94e61f-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.276345 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrfwk\" (UniqueName: \"kubernetes.io/projected/6d355347-569d-4082-b9fd-66d286ef59be-kube-api-access-zrfwk\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.277613 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-db-create-wlsm2"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.284729 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/241a02f1-ca6d-4c3c-b635-2156947f47c4-kube-api-access-cpz7h" (OuterVolumeSpecName: "kube-api-access-cpz7h") pod "241a02f1-ca6d-4c3c-b635-2156947f47c4" (UID: "241a02f1-ca6d-4c3c-b635-2156947f47c4"). InnerVolumeSpecName "kube-api-access-cpz7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.286887 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config" (OuterVolumeSpecName: "openstack-config") pod "6d355347-569d-4082-b9fd-66d286ef59be" (UID: "6d355347-569d-4082-b9fd-66d286ef59be"). InnerVolumeSpecName "openstack-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.288386 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-db-create-wlsm2"] Jan 23 07:19:10 crc kubenswrapper[5102]: W0123 07:19:10.291885 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podd06b6d0d_a628_42df_ac03_fe2e9a7c8b4e.slice/crio-ba715fdcc5b5ae6866d48a85f1480816466c3ccaa4fc783feabbfe979b6dc82d WatchSource:0}: Error finding container ba715fdcc5b5ae6866d48a85f1480816466c3ccaa4fc783feabbfe979b6dc82d: Status 404 returned error can't find the container with id ba715fdcc5b5ae6866d48a85f1480816466c3ccaa4fc783feabbfe979b6dc82d Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.297263 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config-secret" (OuterVolumeSpecName: "openstack-config-secret") pod "6d355347-569d-4082-b9fd-66d286ef59be" (UID: "6d355347-569d-4082-b9fd-66d286ef59be"). InnerVolumeSpecName "openstack-config-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.297368 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-api-db-secret" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.298330 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"placement-db-secret" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.305240 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-81e9-account-create-update-8xpx4"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.305579 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "placement" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="placement" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.306748 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "nova_api" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="nova_api" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.308340 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"placement-db-secret\\\" not found\"" pod="openstack/placement-81e9-account-create-update-8xpx4" podUID="d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.309273 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-8d62-account-create-update-cj4r8" podUID="a8fe4d72-b09b-4158-b4ea-c59192dbc956" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.313164 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "fb784258-3999-4323-8ef6-06631e94e61f" (UID: "fb784258-3999-4323-8ef6-06631e94e61f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.326725 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-db-create-vhjgv"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.331389 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-nb" (OuterVolumeSpecName: "ovsdbserver-nb") pod "241a02f1-ca6d-4c3c-b635-2156947f47c4" (UID: "241a02f1-ca6d-4c3c-b635-2156947f47c4"). InnerVolumeSpecName "ovsdbserver-nb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.356191 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-swift-storage-0" (OuterVolumeSpecName: "dns-swift-storage-0") pod "241a02f1-ca6d-4c3c-b635-2156947f47c4" (UID: "241a02f1-ca6d-4c3c-b635-2156947f47c4"). InnerVolumeSpecName "dns-swift-storage-0". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.360365 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-config" (OuterVolumeSpecName: "config") pod "241a02f1-ca6d-4c3c-b635-2156947f47c4" (UID: "241a02f1-ca6d-4c3c-b635-2156947f47c4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.362295 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-db-create-vhjgv"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.371821 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-db-create-2dcf9"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.374699 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-ovn-controller-tls-certs" (OuterVolumeSpecName: "ovn-controller-tls-certs") pod "fb784258-3999-4323-8ef6-06631e94e61f" (UID: "fb784258-3999-4323-8ef6-06631e94e61f"). InnerVolumeSpecName "ovn-controller-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.375613 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-sb" (OuterVolumeSpecName: "ovsdbserver-sb") pod "241a02f1-ca6d-4c3c-b635-2156947f47c4" (UID: "241a02f1-ca6d-4c3c-b635-2156947f47c4"). InnerVolumeSpecName "ovsdbserver-sb". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.379274 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-svc" (OuterVolumeSpecName: "dns-svc") pod "241a02f1-ca6d-4c3c-b635-2156947f47c4" (UID: "241a02f1-ca6d-4c3c-b635-2156947f47c4"). InnerVolumeSpecName "dns-svc". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380659 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380680 5102 reconciler_common.go:293] "Volume detached for volume \"ovn-controller-tls-certs\" (UniqueName: \"kubernetes.io/secret/fb784258-3999-4323-8ef6-06631e94e61f-ovn-controller-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380691 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cpz7h\" (UniqueName: \"kubernetes.io/projected/241a02f1-ca6d-4c3c-b635-2156947f47c4-kube-api-access-cpz7h\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380700 5102 reconciler_common.go:293] "Volume detached for volume \"dns-svc\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-svc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380709 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-nb\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380718 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-ovsdbserver-sb\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380728 5102 reconciler_common.go:293] "Volume detached for volume \"openstack-config\" (UniqueName: \"kubernetes.io/configmap/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380737 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380745 5102 reconciler_common.go:293] "Volume detached for volume \"dns-swift-storage-0\" (UniqueName: \"kubernetes.io/configmap/241a02f1-ca6d-4c3c-b635-2156947f47c4-dns-swift-storage-0\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380754 5102 reconciler_common.go:293] "Volume detached for volume \"openstack-config-secret\" (UniqueName: \"kubernetes.io/secret/6d355347-569d-4082-b9fd-66d286ef59be-openstack-config-secret\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.380788 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-db-create-2dcf9"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.389844 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f904-account-create-update-9pq8l"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.398629 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-795454f649-697pp"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.398887 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-795454f649-697pp" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-api" containerID="cri-o://a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.399028 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/neutron-795454f649-697pp" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-httpd" containerID="cri-o://7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.409703 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-db-create-z6f8v"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.417655 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-db-create-z6f8v"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.425477 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c788-account-create-update-8vnh5"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.431135 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.431436 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-log" containerID="cri-o://37d6e75df8ea73ff65995440840d50d5ede836b60edd1fd4be81f18b7fb96153" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.431597 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-metadata-0" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-metadata" containerID="cri-o://4d7b6d603a00e934b80420f8abd17cabe70620c234e7bd375a48fc68ea87c3ac" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.467806 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"neutron-db-secret" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.476065 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "neutron" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="neutron" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.479005 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"neutron-db-secret\\\" not found\"" pod="openstack/neutron-c788-account-create-update-8vnh5" podUID="9e48e0ca-0fe0-4be1-8909-11e2407daa7b" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.502307 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-cxcd7"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.528877 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"barbican-db-secret" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.537381 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "barbican" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="barbican" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.539903 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"barbican-db-secret\\\" not found\"" pod="openstack/barbican-f904-account-create-update-9pq8l" podUID="3c23a8e6-4274-4c33-8f73-95b678f6509c" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.649018 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-8d62-account-create-update-cj4r8"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.690397 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.690722 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-log" containerID="cri-o://c00e7ef9f103d8dba5d16c6b74a391b96b09bc3b06eaa21681a7b180182aaf05" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.691055 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-api-0" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-api" containerID="cri-o://f4d4bf4c2380f1096c71e372015b67fe76544c3993ebfdd80decba57527e35ae" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.729235 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.729595 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-scheduler-0" podUID="85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" containerName="nova-scheduler-scheduler" containerID="cri-o://18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.743548 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell1-db-secret" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.745789 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.750804 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "nova_cell1" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="nova_cell1" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.752129 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell1-db-secret\\\" not found\"" pod="openstack/nova-cell1-b157-account-create-update-cxcd7" podUID="76e95d75-3eab-44f8-9d54-2fe68c2fa4fb" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.775518 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-db-create-2dzhq"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.780582 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_2f7956cc-1c1c-410f-94f8-86feb62d9124/ovsdbserver-sb/0.log" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.780650 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.798738 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.817969 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-db-create-2dzhq"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.842159 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-server-0" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerName="rabbitmq" containerID="cri-o://5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec" gracePeriod=604800 Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.842246 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.843716 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.851710 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-db-create-89jsr"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.855051 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.855104 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.857291 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.863312 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_4c43e79a-0827-4f25-a2b4-9b53ec46f96f/ovsdbserver-nb/0.log" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.863174 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-db-create-89jsr"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.863483 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.869159 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-rkvv7" event={"ID":"fb784258-3999-4323-8ef6-06631e94e61f","Type":"ContainerDied","Data":"8d0cdec117560c25624d9a9ccb470c3f38fe338c2b82e706e69159a8717a8b67"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.869211 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-rkvv7" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.869221 5102 scope.go:117] "RemoveContainer" containerID="6605e0593d6c81f526555e9341f952214d02c250a0f141024e9ede2c1ff5f2a9" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.869399 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5f48c766d5-kqw8p"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.869592 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5f48c766d5-kqw8p" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker-log" containerID="cri-o://6070b8a73f605944bd3a15d8db62c51a5d73dffd518f94ec4f6ed403ec5ef669" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.869651 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-worker-5f48c766d5-kqw8p" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker" containerID="cri-o://d878afc3004c8f5f3e7c7a5b43603e184202681b56d93e9496c6bc1f56835ac3" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: W0123 07:19:10.874840 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod599ccc3d_2e89_48e4_9db2_394cfd4364dc.slice/crio-666f5073de7314f3c17bb19d850d645f48f2541be3ea14e2f62ed73fa62118b5 WatchSource:0}: Error finding container 666f5073de7314f3c17bb19d850d645f48f2541be3ea14e2f62ed73fa62118b5: Status 404 returned error can't find the container with id 666f5073de7314f3c17bb19d850d645f48f2541be3ea14e2f62ed73fa62118b5 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.875525 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-sb-0_2f7956cc-1c1c-410f-94f8-86feb62d9124/ovsdbserver-sb/0.log" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.875582 5102 generic.go:334] "Generic (PLEG): container finished" podID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerID="744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b" exitCode=2 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.875598 5102 generic.go:334] "Generic (PLEG): container finished" podID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerID="15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c" exitCode=143 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.875633 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2f7956cc-1c1c-410f-94f8-86feb62d9124","Type":"ContainerDied","Data":"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.875657 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2f7956cc-1c1c-410f-94f8-86feb62d9124","Type":"ContainerDied","Data":"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.875666 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-sb-0" event={"ID":"2f7956cc-1c1c-410f-94f8-86feb62d9124","Type":"ContainerDied","Data":"8dc48cca7d31f7dccd997c7fd879f5ba7f70557f7c5a18f7fe00dbef48252883"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.875710 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.888383 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.898460 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.898509 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.902207 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f350-account-create-update-h8rtr" event={"ID":"91ab8988-66ac-4643-b729-76d2575d0ad0","Type":"ContainerStarted","Data":"d8b233317f27c289cc069464d0896a958f4ff44a9803a3330a4c763a670e10c0"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.913852 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b157-account-create-update-cxcd7" event={"ID":"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb","Type":"ContainerStarted","Data":"d16207586a11f57e60ef190a34a7f4e68325845ff84b1754e4edc150fbd40113"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.916280 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-gzxp8" event={"ID":"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d","Type":"ContainerStarted","Data":"cd07c0b2036a38ddffd40b1c25ac187f97d30bf8e3d3b503357af3f840fd2bf3"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.917047 5102 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-gzxp8" secret="" err="secret \"galera-openstack-cell1-dockercfg-smfmv\" not found" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.918479 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f86b8db9b-zlplv"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.918754 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f86b8db9b-zlplv" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api-log" containerID="cri-o://34cd1354e2d8d1a8790a5b6bcf27425452892c21859624035093db53b7f4bf45" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.918874 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-api-f86b8db9b-zlplv" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api" containerID="cri-o://da77c21b9df506f687f044080259f2d216b0315a1410a1b0676e52084c699b33" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.922270 5102 generic.go:334] "Generic (PLEG): container finished" podID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerID="7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31" exitCode=0 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.922433 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-795454f649-697pp" event={"ID":"76933dbd-cd7b-47f6-a8af-d216e0413bb7","Type":"ContainerDied","Data":"7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.932211 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-l5rzm"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.938518 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstackclient" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.941056 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdbserver-sb-tls-certs\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.941100 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k6n2h\" (UniqueName: \"kubernetes.io/projected/2f7956cc-1c1c-410f-94f8-86feb62d9124-kube-api-access-k6n2h\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.941133 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-scripts\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.941210 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-combined-ca-bundle\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.941285 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-sb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.941361 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdb-rundir\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.941388 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-metrics-certs-tls-certs\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.942096 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-config\") pod \"2f7956cc-1c1c-410f-94f8-86feb62d9124\" (UID: \"2f7956cc-1c1c-410f-94f8-86feb62d9124\") " Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.942876 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.942931 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data podName:1ea732e7-d11d-4e12-9d44-f8fcafa50de5 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:12.942916318 +0000 UTC m=+1503.763265293 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data") pod "rabbitmq-cell1-server-0" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5") : configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.946241 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.946710 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-db-create-ppr5f"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.947194 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2f7956cc-1c1c-410f-94f8-86feb62d9124-kube-api-access-k6n2h" (OuterVolumeSpecName: "kube-api-access-k6n2h") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "kube-api-access-k6n2h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.950620 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-config" (OuterVolumeSpecName: "config") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.951223 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81e9-account-create-update-8xpx4" event={"ID":"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e","Type":"ContainerStarted","Data":"ba715fdcc5b5ae6866d48a85f1480816466c3ccaa4fc783feabbfe979b6dc82d"} Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.951864 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-scripts" (OuterVolumeSpecName: "scripts") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.952809 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage02-crc" (OuterVolumeSpecName: "ovndbcluster-sb-etc-ovn") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "local-storage02-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.956283 5102 scope.go:117] "RemoveContainer" containerID="744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.957353 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "cinder" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="cinder" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.959673 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-db-create-ppr5f"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.960080 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.960385 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"cinder-db-secret\\\" not found\"" pod="openstack/cinder-f350-account-create-update-h8rtr" podUID="91ab8988-66ac-4643-b729-76d2575d0ad0" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.961799 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"nova-cell0-db-secret" Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.966681 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-gzxp8" podUID="278bc0a5-d40a-4983-b8bd-ae5b8e6af12d" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.970022 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-57649777bb-wl6hv"] Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.970367 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:10 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: if [ -n "nova_cell0" ]; then Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="nova_cell0" Jan 23 07:19:10 crc kubenswrapper[5102]: else Jan 23 07:19:10 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:10 crc kubenswrapper[5102]: fi Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:10 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:10 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:10 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:10 crc kubenswrapper[5102]: # support updates Jan 23 07:19:10 crc kubenswrapper[5102]: Jan 23 07:19:10 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.970404 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener" containerID="cri-o://7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.970514 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener-log" containerID="cri-o://0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577" gracePeriod=30 Jan 23 07:19:10 crc kubenswrapper[5102]: E0123 07:19:10.971769 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-cell0-db-secret\\\" not found\"" pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" podUID="599ccc3d-2e89-48e4-9db2-394cfd4364dc" Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.982011 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vcbp6"] Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.987960 5102 generic.go:334] "Generic (PLEG): container finished" podID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerID="c00e7ef9f103d8dba5d16c6b74a391b96b09bc3b06eaa21681a7b180182aaf05" exitCode=143 Jan 23 07:19:10 crc kubenswrapper[5102]: I0123 07:19:10.988041 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66d1a45d-2635-496c-92c1-86e3a686c5b8","Type":"ContainerDied","Data":"c00e7ef9f103d8dba5d16c6b74a391b96b09bc3b06eaa21681a7b180182aaf05"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.004967 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-db-sync-vcbp6"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.010652 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.017002 5102 generic.go:334] "Generic (PLEG): container finished" podID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerID="107c5f3d9db926ad82c1955c3fd0cba07ea73b8de197843699ad0edcedf0354b" exitCode=143 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.017073 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.017110 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6c3459b4-efed-4868-8fd0-ffeb07f0100d","Type":"ContainerDied","Data":"107c5f3d9db926ad82c1955c3fd0cba07ea73b8de197843699ad0edcedf0354b"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.017264 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-conductor-0" podUID="6c65ea3f-14be-4130-b116-2291c114323e" containerName="nova-cell1-conductor-conductor" containerID="cri-o://d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" gracePeriod=30 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.028908 5102 scope.go:117] "RemoveContainer" containerID="15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.036457 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qvfhk"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.042841 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.044622 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/dnsmasq-dns-fcd6f8f8f-9fbct" event={"ID":"241a02f1-ca6d-4c3c-b635-2156947f47c4","Type":"ContainerDied","Data":"0004288105d602bcb46a4b5f4689fd556ee0425c687d50be51cae3c640596ed8"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045229 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-metrics-certs-tls-certs\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045322 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdb-rundir\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045349 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-combined-ca-bundle\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045381 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdbserver-nb-tls-certs\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045431 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndbcluster-nb-etc-ovn\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045463 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-scripts\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045490 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-config\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.045602 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wm4jz\" (UniqueName: \"kubernetes.io/projected/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-kube-api-access-wm4jz\") pod \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\" (UID: \"4c43e79a-0827-4f25-a2b4-9b53ec46f96f\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.046061 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k6n2h\" (UniqueName: \"kubernetes.io/projected/2f7956cc-1c1c-410f-94f8-86feb62d9124-kube-api-access-k6n2h\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.046072 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.046081 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.046098 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.046108 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.046117 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2f7956cc-1c1c-410f-94f8-86feb62d9124-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.046867 5102 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.046917 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts podName:278bc0a5-d40a-4983-b8bd-ae5b8e6af12d nodeName:}" failed. No retries permitted until 2026-01-23 07:19:11.54689893 +0000 UTC m=+1502.367247895 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts") pod "root-account-create-update-gzxp8" (UID: "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d") : configmap "openstack-cell1-scripts" not found Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.053310 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdb-rundir" (OuterVolumeSpecName: "ovsdb-rundir") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "ovsdb-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.053945 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-kube-api-access-wm4jz" (OuterVolumeSpecName: "kube-api-access-wm4jz") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "kube-api-access-wm4jz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.062957 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-config" (OuterVolumeSpecName: "config") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.063196 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-scripts" (OuterVolumeSpecName: "scripts") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.065967 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage04-crc" (OuterVolumeSpecName: "ovndbcluster-nb-etc-ovn") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "local-storage04-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.066816 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8d62-account-create-update-cj4r8" event={"ID":"a8fe4d72-b09b-4158-b4ea-c59192dbc956","Type":"ContainerStarted","Data":"cac5f15c44990ea7a567157ea6b0f808108ecd6d4faf4c4f7486457dba2ece90"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.089574 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage02-crc" (UniqueName: "kubernetes.io/local-volume/local-storage02-crc") on node "crc" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.090085 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-db-sync-qvfhk"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.090411 5102 generic.go:334] "Generic (PLEG): container finished" podID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerID="868d87cea06d5b9482b8147a33f18e2828a731b3e1fb46272675463a760abf4f" exitCode=143 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.090486 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"302ce3d2-72f6-429c-b3cb-16e8fba0d04e","Type":"ContainerDied","Data":"868d87cea06d5b9482b8147a33f18e2828a731b3e1fb46272675463a760abf4f"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.112727 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdbserver-sb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-sb-tls-certs") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "ovsdbserver-sb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.132797 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-gzxp8"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.134454 5102 scope.go:117] "RemoveContainer" containerID="744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b" Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.135253 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:11 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:11 crc kubenswrapper[5102]: Jan 23 07:19:11 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:11 crc kubenswrapper[5102]: Jan 23 07:19:11 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:11 crc kubenswrapper[5102]: Jan 23 07:19:11 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:11 crc kubenswrapper[5102]: Jan 23 07:19:11 crc kubenswrapper[5102]: if [ -n "nova_api" ]; then Jan 23 07:19:11 crc kubenswrapper[5102]: GRANT_DATABASE="nova_api" Jan 23 07:19:11 crc kubenswrapper[5102]: else Jan 23 07:19:11 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:11 crc kubenswrapper[5102]: fi Jan 23 07:19:11 crc kubenswrapper[5102]: Jan 23 07:19:11 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:11 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:11 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:11 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:11 crc kubenswrapper[5102]: # support updates Jan 23 07:19:11 crc kubenswrapper[5102]: Jan 23 07:19:11 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.136809 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"nova-api-db-secret\\\" not found\"" pod="openstack/nova-api-8d62-account-create-update-cj4r8" podUID="a8fe4d72-b09b-4158-b4ea-c59192dbc956" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.139356 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f904-account-create-update-9pq8l" event={"ID":"3c23a8e6-4274-4c33-8f73-95b678f6509c","Type":"ContainerStarted","Data":"2941bb83a9a001c682c291ae89831f946716e248ade9acbb7e3a2961c300391d"} Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.139798 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b\": container with ID starting with 744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b not found: ID does not exist" containerID="744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.139839 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b"} err="failed to get container status \"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b\": rpc error: code = NotFound desc = could not find container \"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b\": container with ID starting with 744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.139862 5102 scope.go:117] "RemoveContainer" containerID="15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.148443 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdb-rundir\" (UniqueName: \"kubernetes.io/empty-dir/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdb-rundir\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.148504 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage02-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage02-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.148525 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.150516 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.150530 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.150570 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wm4jz\" (UniqueName: \"kubernetes.io/projected/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-kube-api-access-wm4jz\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.150583 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-sb-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-ovsdbserver-sb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.156876 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.157139 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell0-conductor-0" podUID="1893371f-b289-4336-a8ed-1bd78e9191b6" containerName="nova-cell0-conductor-conductor" containerID="cri-o://ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" gracePeriod=30 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.180132 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.180384 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/nova-cell1-novncproxy-0" podUID="d9a35726-d2a8-4175-9398-2f49e4598f63" containerName="nova-cell1-novncproxy-novncproxy" containerID="cri-o://4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99" gracePeriod=30 Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.195854 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c\": container with ID starting with 15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c not found: ID does not exist" containerID="15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.195914 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c"} err="failed to get container status \"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c\": rpc error: code = NotFound desc = could not find container \"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c\": container with ID starting with 15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.195939 5102 scope.go:117] "RemoveContainer" containerID="744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.196695 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b"} err="failed to get container status \"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b\": rpc error: code = NotFound desc = could not find container \"744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b\": container with ID starting with 744c7c75cfc2e2f2205b1ed760e46edcccdeabf9a656042c5399b643d1544c7b not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.196737 5102 scope.go:117] "RemoveContainer" containerID="15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.199180 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage04-crc" (UniqueName: "kubernetes.io/local-volume/local-storage04-crc") on node "crc" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.200365 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "2f7956cc-1c1c-410f-94f8-86feb62d9124" (UID: "2f7956cc-1c1c-410f-94f8-86feb62d9124"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.206079 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c788-account-create-update-8vnh5" event={"ID":"9e48e0ca-0fe0-4be1-8909-11e2407daa7b","Type":"ContainerStarted","Data":"2d6485bfb81a579c1fc67fd524d7f28cce05cdd41cab7d3df1fd87abd694d89a"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.206122 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c"} err="failed to get container status \"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c\": rpc error: code = NotFound desc = could not find container \"15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c\": container with ID starting with 15135f03d8ea19ae331f5e1448f867607a4d305a5744d1b0b25c85cc4b619d6c not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.206325 5102 scope.go:117] "RemoveContainer" containerID="f754eb55679f47320849d8ad1be68524f1721681ec6135a0821f6efc11b8ffb1" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.206507 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.246732 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdbserver-nb-tls-certs" (OuterVolumeSpecName: "ovsdbserver-nb-tls-certs") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "ovsdbserver-nb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.254313 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/2f7956cc-1c1c-410f-94f8-86feb62d9124-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.254317 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-cell1-galera-0" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerName="galera" containerID="cri-o://4e4445ea0de06f9fa2ec0a6389c9a2952e55e6ae8854f80a494b563a6aab848e" gracePeriod=30 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.254340 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.254374 5102 reconciler_common.go:293] "Volume detached for volume \"ovsdbserver-nb-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-ovsdbserver-nb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.254383 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage04-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage04-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.257608 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.264146 5102 generic.go:334] "Generic (PLEG): container finished" podID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerID="38ba8da046ac14dc360b77fb7112dee42133d1a68989da117321421af10dcea2" exitCode=143 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.264220 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8","Type":"ContainerDied","Data":"38ba8da046ac14dc360b77fb7112dee42133d1a68989da117321421af10dcea2"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.280366 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "4c43e79a-0827-4f25-a2b4-9b53ec46f96f" (UID: "4c43e79a-0827-4f25-a2b4-9b53ec46f96f"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.281127 5102 generic.go:334] "Generic (PLEG): container finished" podID="47725711-7e88-4c25-8016-f70488231203" containerID="37d6e75df8ea73ff65995440840d50d5ede836b60edd1fd4be81f18b7fb96153" exitCode=143 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.281191 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47725711-7e88-4c25-8016-f70488231203","Type":"ContainerDied","Data":"37d6e75df8ea73ff65995440840d50d5ede836b60edd1fd4be81f18b7fb96153"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.287385 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-gzxp8"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.300754 5102 generic.go:334] "Generic (PLEG): container finished" podID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerID="04a75c697ea1aac00dffbc51b878b9c90262d7c394882f3e8e4fead3dde40397" exitCode=143 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.300821 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f55c94446-2fcrd" event={"ID":"35dab127-50f2-4f30-ba2f-68744d0a6ae8","Type":"ContainerDied","Data":"04a75c697ea1aac00dffbc51b878b9c90262d7c394882f3e8e4fead3dde40397"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.305647 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovsdbserver-nb-0_4c43e79a-0827-4f25-a2b4-9b53ec46f96f/ovsdbserver-nb/0.log" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.305681 5102 generic.go:334] "Generic (PLEG): container finished" podID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerID="e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039" exitCode=2 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.305695 5102 generic.go:334] "Generic (PLEG): container finished" podID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" exitCode=143 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.305720 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4c43e79a-0827-4f25-a2b4-9b53ec46f96f","Type":"ContainerDied","Data":"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.305745 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4c43e79a-0827-4f25-a2b4-9b53ec46f96f","Type":"ContainerDied","Data":"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.305755 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovsdbserver-nb-0" event={"ID":"4c43e79a-0827-4f25-a2b4-9b53ec46f96f","Type":"ContainerDied","Data":"b2882d618cb10293f64f9242e20d2b2d712b4391c1c3a826bf040e1ada6c3f21"} Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.305804 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-nb-0" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.342937 5102 scope.go:117] "RemoveContainer" containerID="f4d5919866b49897b78006ffb1208e46c4edbce085f06eb3868680b3cfad3178" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.343144 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-f350-account-create-update-h8rtr"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.357744 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/4c43e79a-0827-4f25-a2b4-9b53ec46f96f-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.368748 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-metrics-hqzgg"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.373377 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/rabbitmq-cell1-server-0" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerName="rabbitmq" containerID="cri-o://33b3d6f15adbf2ba58af4031167e04bf38158518432643d8d72a903641549c7a" gracePeriod=604800 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.386094 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-metrics-hqzgg"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.418672 5102 scope.go:117] "RemoveContainer" containerID="06c0a7654873431b095e5493274efbb96eff7c0aed02ef4432705c549fc8af79" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.419821 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.426599 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-845d4fc79c-bhsj4"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.426870 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" containerID="cri-o://ff22da91cf223373d458a345f27d72a475d66d4886ff0e810b76172b7cd4967a" gracePeriod=30 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.426996 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-server" containerID="cri-o://8c97a629f34f855d74c165a346be69cd13786a9b440301da9b3e18c2d09f3c5d" gracePeriod=30 Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.455781 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-8d62-account-create-update-cj4r8"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.469217 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-81e9-account-create-update-8xpx4"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.497917 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f904-account-create-update-9pq8l"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.505226 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c788-account-create-update-8vnh5"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.521357 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-cxcd7"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.539458 5102 scope.go:117] "RemoveContainer" containerID="e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.548723 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-l5rzm"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.561393 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-operator-scripts\") pod \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.561459 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2cfgp\" (UniqueName: \"kubernetes.io/projected/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-kube-api-access-2cfgp\") pod \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\" (UID: \"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb\") " Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.561942 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.561989 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data podName:f4fc3e1d-5fac-4696-a8eb-709db37b5ff6 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:15.561974716 +0000 UTC m=+1506.382323691 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data") pod "rabbitmq-server-0" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6") : configmap "rabbitmq-config-data" not found Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.562037 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "76e95d75-3eab-44f8-9d54-2fe68c2fa4fb" (UID: "76e95d75-3eab-44f8-9d54-2fe68c2fa4fb"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.562155 5102 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.562206 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts podName:278bc0a5-d40a-4983-b8bd-ae5b8e6af12d nodeName:}" failed. No retries permitted until 2026-01-23 07:19:12.562189083 +0000 UTC m=+1503.382538048 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts") pod "root-account-create-update-gzxp8" (UID: "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d") : configmap "openstack-cell1-scripts" not found Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.579859 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-kube-api-access-2cfgp" (OuterVolumeSpecName: "kube-api-access-2cfgp") pod "76e95d75-3eab-44f8-9d54-2fe68c2fa4fb" (UID: "76e95d75-3eab-44f8-9d54-2fe68c2fa4fb"). InnerVolumeSpecName "kube-api-access-2cfgp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.593584 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-rkvv7"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.597499 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-rkvv7"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.649118 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="073f9584-597d-4618-9e0d-4ca37ae233cf" path="/var/lib/kubelet/pods/073f9584-597d-4618-9e0d-4ca37ae233cf/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.649914 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="133dbc1b-39f3-41fa-9489-5cd5777f5865" path="/var/lib/kubelet/pods/133dbc1b-39f3-41fa-9489-5cd5777f5865/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.650494 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="13e397d3-faf3-41e3-b040-46b90a3e7c2c" path="/var/lib/kubelet/pods/13e397d3-faf3-41e3-b040-46b90a3e7c2c/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.653753 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="195d99d7-c8be-42f4-8f65-3209be1334b5" path="/var/lib/kubelet/pods/195d99d7-c8be-42f4-8f65-3209be1334b5/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.654568 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2dd81582-411c-483f-a6e0-08d3172ff873" path="/var/lib/kubelet/pods/2dd81582-411c-483f-a6e0-08d3172ff873/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.655161 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4d8c9762-bd4b-424c-943b-2b114c08211e" path="/var/lib/kubelet/pods/4d8c9762-bd4b-424c-943b-2b114c08211e/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.656201 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6d355347-569d-4082-b9fd-66d286ef59be" path="/var/lib/kubelet/pods/6d355347-569d-4082-b9fd-66d286ef59be/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.656733 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7f6ab118-4fb2-4b08-a015-08d76f3fcb38" path="/var/lib/kubelet/pods/7f6ab118-4fb2-4b08-a015-08d76f3fcb38/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.657237 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="88a152e3-a95a-4c00-a905-7b88c737c0fc" path="/var/lib/kubelet/pods/88a152e3-a95a-4c00-a905-7b88c737c0fc/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.658875 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8b567354-3e7f-446b-af77-f81ae5de44ce" path="/var/lib/kubelet/pods/8b567354-3e7f-446b-af77-f81ae5de44ce/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.659403 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b72030e8-a814-47ec-b0f2-edd8b146ff7a" path="/var/lib/kubelet/pods/b72030e8-a814-47ec-b0f2-edd8b146ff7a/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.659934 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c92486fb-6bab-4681-b42a-11b5631b265f" path="/var/lib/kubelet/pods/c92486fb-6bab-4681-b42a-11b5631b265f/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.660636 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2aadd3e-7281-49ae-88b5-611993646185" path="/var/lib/kubelet/pods/d2aadd3e-7281-49ae-88b5-611993646185/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.663233 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.663265 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2cfgp\" (UniqueName: \"kubernetes.io/projected/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb-kube-api-access-2cfgp\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.677272 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="df64b95b-fb03-49b3-b9e2-7d064e39c71b" path="/var/lib/kubelet/pods/df64b95b-fb03-49b3-b9e2-7d064e39c71b/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.678925 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fb784258-3999-4323-8ef6-06631e94e61f" path="/var/lib/kubelet/pods/fb784258-3999-4323-8ef6-06631e94e61f/volumes" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.684252 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-9fbct"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.684279 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/dnsmasq-dns-fcd6f8f8f-9fbct"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.699508 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.723633 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-nb-0"] Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.821204 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.854232 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.857130 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.902134 5102 scope.go:117] "RemoveContainer" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.963737 5102 scope.go:117] "RemoveContainer" containerID="e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039" Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.964260 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039\": container with ID starting with e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039 not found: ID does not exist" containerID="e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.964282 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039"} err="failed to get container status \"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039\": rpc error: code = NotFound desc = could not find container \"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039\": container with ID starting with e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039 not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.964303 5102 scope.go:117] "RemoveContainer" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" Jan 23 07:19:11 crc kubenswrapper[5102]: E0123 07:19:11.965870 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e\": container with ID starting with eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e not found: ID does not exist" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.965887 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e"} err="failed to get container status \"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e\": rpc error: code = NotFound desc = could not find container \"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e\": container with ID starting with eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.965899 5102 scope.go:117] "RemoveContainer" containerID="e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.966328 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039"} err="failed to get container status \"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039\": rpc error: code = NotFound desc = could not find container \"e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039\": container with ID starting with e26ec7a40ea7e84af32f3efba9904fdfe49b4a1128882c6aa8617efb71ebb039 not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.966343 5102 scope.go:117] "RemoveContainer" containerID="eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.966586 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e"} err="failed to get container status \"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e\": rpc error: code = NotFound desc = could not find container \"eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e\": container with ID starting with eaad28e2eb0e2c290e03e1274165fcedac1b5137a241b5e2fa59daeb3f2ab10e not found: ID does not exist" Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.977359 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2msc4\" (UniqueName: \"kubernetes.io/projected/3c23a8e6-4274-4c33-8f73-95b678f6509c-kube-api-access-2msc4\") pod \"3c23a8e6-4274-4c33-8f73-95b678f6509c\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.977462 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-operator-scripts\") pod \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " Jan 23 07:19:11 crc kubenswrapper[5102]: I0123 07:19:11.979923 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "9e48e0ca-0fe0-4be1-8909-11e2407daa7b" (UID: "9e48e0ca-0fe0-4be1-8909-11e2407daa7b"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.993171 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3c23a8e6-4274-4c33-8f73-95b678f6509c-kube-api-access-2msc4" (OuterVolumeSpecName: "kube-api-access-2msc4") pod "3c23a8e6-4274-4c33-8f73-95b678f6509c" (UID: "3c23a8e6-4274-4c33-8f73-95b678f6509c"). InnerVolumeSpecName "kube-api-access-2msc4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.993333 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msnc5\" (UniqueName: \"kubernetes.io/projected/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-kube-api-access-msnc5\") pod \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\" (UID: \"9e48e0ca-0fe0-4be1-8909-11e2407daa7b\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.993378 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-operator-scripts\") pod \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.993465 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2kwx5\" (UniqueName: \"kubernetes.io/projected/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-kube-api-access-2kwx5\") pod \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\" (UID: \"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.993514 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c23a8e6-4274-4c33-8f73-95b678f6509c-operator-scripts\") pod \"3c23a8e6-4274-4c33-8f73-95b678f6509c\" (UID: \"3c23a8e6-4274-4c33-8f73-95b678f6509c\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.994207 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2msc4\" (UniqueName: \"kubernetes.io/projected/3c23a8e6-4274-4c33-8f73-95b678f6509c-kube-api-access-2msc4\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.994226 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.994588 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3c23a8e6-4274-4c33-8f73-95b678f6509c-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "3c23a8e6-4274-4c33-8f73-95b678f6509c" (UID: "3c23a8e6-4274-4c33-8f73-95b678f6509c"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:11.995512 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e" (UID: "d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.004215 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-kube-api-access-msnc5" (OuterVolumeSpecName: "kube-api-access-msnc5") pod "9e48e0ca-0fe0-4be1-8909-11e2407daa7b" (UID: "9e48e0ca-0fe0-4be1-8909-11e2407daa7b"). InnerVolumeSpecName "kube-api-access-msnc5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.006449 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-kube-api-access-2kwx5" (OuterVolumeSpecName: "kube-api-access-2kwx5") pod "d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e" (UID: "d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e"). InnerVolumeSpecName "kube-api-access-2kwx5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.098118 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msnc5\" (UniqueName: \"kubernetes.io/projected/9e48e0ca-0fe0-4be1-8909-11e2407daa7b-kube-api-access-msnc5\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.098149 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.098158 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2kwx5\" (UniqueName: \"kubernetes.io/projected/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e-kube-api-access-2kwx5\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.098167 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/3c23a8e6-4274-4c33-8f73-95b678f6509c-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.103903 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.301404 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lqtnb\" (UniqueName: \"kubernetes.io/projected/d9a35726-d2a8-4175-9398-2f49e4598f63-kube-api-access-lqtnb\") pod \"d9a35726-d2a8-4175-9398-2f49e4598f63\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.301777 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-config-data\") pod \"d9a35726-d2a8-4175-9398-2f49e4598f63\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.301824 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-vencrypt-tls-certs\") pod \"d9a35726-d2a8-4175-9398-2f49e4598f63\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.301883 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-nova-novncproxy-tls-certs\") pod \"d9a35726-d2a8-4175-9398-2f49e4598f63\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.301917 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-combined-ca-bundle\") pod \"d9a35726-d2a8-4175-9398-2f49e4598f63\" (UID: \"d9a35726-d2a8-4175-9398-2f49e4598f63\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.308474 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" probeResult="failure" output="Get \"https://10.217.0.179:8080/healthcheck\": dial tcp 10.217.0.179:8080: connect: connection refused" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.308848 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-server" probeResult="failure" output="Get \"https://10.217.0.179:8080/healthcheck\": dial tcp 10.217.0.179:8080: connect: connection refused" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.320781 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d9a35726-d2a8-4175-9398-2f49e4598f63-kube-api-access-lqtnb" (OuterVolumeSpecName: "kube-api-access-lqtnb") pod "d9a35726-d2a8-4175-9398-2f49e4598f63" (UID: "d9a35726-d2a8-4175-9398-2f49e4598f63"). InnerVolumeSpecName "kube-api-access-lqtnb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.343461 5102 generic.go:334] "Generic (PLEG): container finished" podID="57f488ce-4b72-40f4-82d8-ad074776c306" containerID="0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577" exitCode=143 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.343532 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" event={"ID":"57f488ce-4b72-40f4-82d8-ad074776c306","Type":"ContainerDied","Data":"0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344238 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/root-account-create-update-xrljw"] Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344718 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344736 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344751 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerName="init" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344758 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerName="init" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344769 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344775 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344783 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="ovsdbserver-nb" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344789 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="ovsdbserver-nb" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344813 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="df64b95b-fb03-49b3-b9e2-7d064e39c71b" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344822 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="df64b95b-fb03-49b3-b9e2-7d064e39c71b" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344835 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d9a35726-d2a8-4175-9398-2f49e4598f63" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344845 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d9a35726-d2a8-4175-9398-2f49e4598f63" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344864 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerName="dnsmasq-dns" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344870 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerName="dnsmasq-dns" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344882 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344888 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.344901 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="ovsdbserver-sb" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.344907 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="ovsdbserver-sb" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345094 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="df64b95b-fb03-49b3-b9e2-7d064e39c71b" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345109 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="241a02f1-ca6d-4c3c-b635-2156947f47c4" containerName="dnsmasq-dns" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345116 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="ovsdbserver-nb" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345125 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345133 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="ovsdbserver-sb" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345143 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d9a35726-d2a8-4175-9398-2f49e4598f63" containerName="nova-cell1-novncproxy-novncproxy" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345154 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="fb784258-3999-4323-8ef6-06631e94e61f" containerName="ovn-controller" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.345163 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" containerName="openstack-network-exporter" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.346033 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.346639 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-81e9-account-create-update-8xpx4" event={"ID":"d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e","Type":"ContainerDied","Data":"ba715fdcc5b5ae6866d48a85f1480816466c3ccaa4fc783feabbfe979b6dc82d"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.346706 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-81e9-account-create-update-8xpx4" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.348636 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"openstack-mariadb-root-db-secret" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.349641 5102 generic.go:334] "Generic (PLEG): container finished" podID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerID="34cd1354e2d8d1a8790a5b6bcf27425452892c21859624035093db53b7f4bf45" exitCode=143 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.349690 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f86b8db9b-zlplv" event={"ID":"84ff9e74-154d-4279-befe-109c03fb7c3b","Type":"ContainerDied","Data":"34cd1354e2d8d1a8790a5b6bcf27425452892c21859624035093db53b7f4bf45"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.350717 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" event={"ID":"599ccc3d-2e89-48e4-9db2-394cfd4364dc","Type":"ContainerStarted","Data":"666f5073de7314f3c17bb19d850d645f48f2541be3ea14e2f62ed73fa62118b5"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.362140 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-f904-account-create-update-9pq8l" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.362353 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-f904-account-create-update-9pq8l" event={"ID":"3c23a8e6-4274-4c33-8f73-95b678f6509c","Type":"ContainerDied","Data":"2941bb83a9a001c682c291ae89831f946716e248ade9acbb7e3a2961c300391d"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.367635 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/root-account-create-update-xrljw"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.370836 5102 generic.go:334] "Generic (PLEG): container finished" podID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerID="6070b8a73f605944bd3a15d8db62c51a5d73dffd518f94ec4f6ed403ec5ef669" exitCode=143 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.370920 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f48c766d5-kqw8p" event={"ID":"6abef536-ae8a-4a68-9c29-87a9af5aaee6","Type":"ContainerDied","Data":"6070b8a73f605944bd3a15d8db62c51a5d73dffd518f94ec4f6ed403ec5ef669"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.384720 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-config-data" (OuterVolumeSpecName: "config-data") pod "d9a35726-d2a8-4175-9398-2f49e4598f63" (UID: "d9a35726-d2a8-4175-9398-2f49e4598f63"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.393509 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d9a35726-d2a8-4175-9398-2f49e4598f63" (UID: "d9a35726-d2a8-4175-9398-2f49e4598f63"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.403478 5102 generic.go:334] "Generic (PLEG): container finished" podID="d9a35726-d2a8-4175-9398-2f49e4598f63" containerID="4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99" exitCode=0 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.403605 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d9a35726-d2a8-4175-9398-2f49e4598f63","Type":"ContainerDied","Data":"4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.403640 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-novncproxy-0" event={"ID":"d9a35726-d2a8-4175-9398-2f49e4598f63","Type":"ContainerDied","Data":"7da464590a5dadf7254d54fa366db75e0fa8cad58465651d54c77255d00fa00c"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.403658 5102 scope.go:117] "RemoveContainer" containerID="4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.403843 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-novncproxy-0" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.405837 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.405867 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.405883 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lqtnb\" (UniqueName: \"kubernetes.io/projected/d9a35726-d2a8-4175-9398-2f49e4598f63-kube-api-access-lqtnb\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.420295 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-c788-account-create-update-8vnh5" event={"ID":"9e48e0ca-0fe0-4be1-8909-11e2407daa7b","Type":"ContainerDied","Data":"2d6485bfb81a579c1fc67fd524d7f28cce05cdd41cab7d3df1fd87abd694d89a"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.420505 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-c788-account-create-update-8vnh5" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.427917 5102 generic.go:334] "Generic (PLEG): container finished" podID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerID="8c97a629f34f855d74c165a346be69cd13786a9b440301da9b3e18c2d09f3c5d" exitCode=0 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.427957 5102 generic.go:334] "Generic (PLEG): container finished" podID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerID="ff22da91cf223373d458a345f27d72a475d66d4886ff0e810b76172b7cd4967a" exitCode=0 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.428015 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-845d4fc79c-bhsj4" event={"ID":"ac268af7-b49d-40bf-97c8-7abc5ff2bdad","Type":"ContainerDied","Data":"8c97a629f34f855d74c165a346be69cd13786a9b440301da9b3e18c2d09f3c5d"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.428040 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-845d4fc79c-bhsj4" event={"ID":"ac268af7-b49d-40bf-97c8-7abc5ff2bdad","Type":"ContainerDied","Data":"ff22da91cf223373d458a345f27d72a475d66d4886ff0e810b76172b7cd4967a"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.437470 5102 generic.go:334] "Generic (PLEG): container finished" podID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerID="d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44" exitCode=0 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.437643 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c08d6c5-8422-4da2-b8f3-2760dbebc521","Type":"ContainerDied","Data":"d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.461074 5102 generic.go:334] "Generic (PLEG): container finished" podID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerID="4e4445ea0de06f9fa2ec0a6389c9a2952e55e6ae8854f80a494b563a6aab848e" exitCode=0 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.461174 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0e1fd671-9192-4406-b7ea-3a33b4cdec57","Type":"ContainerDied","Data":"4e4445ea0de06f9fa2ec0a6389c9a2952e55e6ae8854f80a494b563a6aab848e"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.463551 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-b157-account-create-update-cxcd7" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.464582 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-b157-account-create-update-cxcd7" event={"ID":"76e95d75-3eab-44f8-9d54-2fe68c2fa4fb","Type":"ContainerDied","Data":"d16207586a11f57e60ef190a34a7f4e68325845ff84b1754e4edc150fbd40113"} Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.464888 5102 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-gzxp8" secret="" err="secret \"galera-openstack-cell1-dockercfg-smfmv\" not found" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.479509 5102 scope.go:117] "RemoveContainer" containerID="4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.480347 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:12 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:12 crc kubenswrapper[5102]: Jan 23 07:19:12 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:12 crc kubenswrapper[5102]: Jan 23 07:19:12 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:12 crc kubenswrapper[5102]: Jan 23 07:19:12 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:12 crc kubenswrapper[5102]: Jan 23 07:19:12 crc kubenswrapper[5102]: if [ -n "" ]; then Jan 23 07:19:12 crc kubenswrapper[5102]: GRANT_DATABASE="" Jan 23 07:19:12 crc kubenswrapper[5102]: else Jan 23 07:19:12 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:12 crc kubenswrapper[5102]: fi Jan 23 07:19:12 crc kubenswrapper[5102]: Jan 23 07:19:12 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:12 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:12 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:12 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:12 crc kubenswrapper[5102]: # support updates Jan 23 07:19:12 crc kubenswrapper[5102]: Jan 23 07:19:12 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.484725 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-cell1-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-gzxp8" podUID="278bc0a5-d40a-4983-b8bd-ae5b8e6af12d" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.496327 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99\": container with ID starting with 4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99 not found: ID does not exist" containerID="4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.496368 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99"} err="failed to get container status \"4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99\": rpc error: code = NotFound desc = could not find container \"4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99\": container with ID starting with 4162bfd4f13e8c990c1fc6602dbff8428a65e1742a158cabaf48cb788dc72f99 not found: ID does not exist" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.496836 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.499145 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-nova-novncproxy-tls-certs" (OuterVolumeSpecName: "nova-novncproxy-tls-certs") pod "d9a35726-d2a8-4175-9398-2f49e4598f63" (UID: "d9a35726-d2a8-4175-9398-2f49e4598f63"). InnerVolumeSpecName "nova-novncproxy-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.505420 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.510353 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5lq85\" (UniqueName: \"kubernetes.io/projected/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-kube-api-access-5lq85\") pod \"root-account-create-update-xrljw\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.510568 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts\") pod \"root-account-create-update-xrljw\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.511446 5102 reconciler_common.go:293] "Volume detached for volume \"nova-novncproxy-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-nova-novncproxy-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.512231 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-81e9-account-create-update-8xpx4"] Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.518520 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" cmd=["/usr/local/bin/container-scripts/status_check.sh"] Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.518598 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-northd-0" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="ovn-northd" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.542078 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-vencrypt-tls-certs" (OuterVolumeSpecName: "vencrypt-tls-certs") pod "d9a35726-d2a8-4175-9398-2f49e4598f63" (UID: "d9a35726-d2a8-4175-9398-2f49e4598f63"). InnerVolumeSpecName "vencrypt-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.547036 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-81e9-account-create-update-8xpx4"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.613387 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5lq85\" (UniqueName: \"kubernetes.io/projected/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-kube-api-access-5lq85\") pod \"root-account-create-update-xrljw\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.613913 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts\") pod \"root-account-create-update-xrljw\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.614683 5102 reconciler_common.go:293] "Volume detached for volume \"vencrypt-tls-certs\" (UniqueName: \"kubernetes.io/secret/d9a35726-d2a8-4175-9398-2f49e4598f63-vencrypt-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.615180 5102 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 23 07:19:12 crc kubenswrapper[5102]: E0123 07:19:12.615230 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts podName:278bc0a5-d40a-4983-b8bd-ae5b8e6af12d nodeName:}" failed. No retries permitted until 2026-01-23 07:19:14.615215968 +0000 UTC m=+1505.435564933 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts") pod "root-account-create-update-gzxp8" (UID: "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d") : configmap "openstack-cell1-scripts" not found Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.616594 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts\") pod \"root-account-create-update-xrljw\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.621010 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-f904-account-create-update-9pq8l"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.634599 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5lq85\" (UniqueName: \"kubernetes.io/projected/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-kube-api-access-5lq85\") pod \"root-account-create-update-xrljw\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.634726 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-f904-account-create-update-9pq8l"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.663169 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-cxcd7"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.671696 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-b157-account-create-update-cxcd7"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.694288 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-c788-account-create-update-8vnh5"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.694451 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.703716 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-c788-account-create-update-8vnh5"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.849004 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.861573 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.862591 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-novncproxy-0"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.938951 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-operator-scripts\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.938997 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tfrrm\" (UniqueName: \"kubernetes.io/projected/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kube-api-access-tfrrm\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.939043 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-galera-tls-certs\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.939088 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kolla-config\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.939201 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-default\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.939240 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-combined-ca-bundle\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.939271 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-generated\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.939288 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") pod \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\" (UID: \"0e1fd671-9192-4406-b7ea-3a33b4cdec57\") " Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.940077 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.940800 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.941443 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.941482 5102 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.942070 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.944733 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.945558 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kube-api-access-tfrrm" (OuterVolumeSpecName: "kube-api-access-tfrrm") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "kube-api-access-tfrrm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.975417 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.982716 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.983049 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-central-agent" containerID="cri-o://3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc" gracePeriod=30 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.984798 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="proxy-httpd" containerID="cri-o://7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919" gracePeriod=30 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.984853 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="sg-core" containerID="cri-o://d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8" gracePeriod=30 Jan 23 07:19:12 crc kubenswrapper[5102]: I0123 07:19:12.984886 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/ceilometer-0" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-notification-agent" containerID="cri-o://440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222" gracePeriod=30 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:12.999919 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage12-crc" (OuterVolumeSpecName: "mysql-db") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "local-storage12-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.026817 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.027422 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/kube-state-metrics-0" podUID="ddb8da53-e17d-4c8d-a625-0d241d2caafd" containerName="kube-state-metrics" containerID="cri-o://60739a227d7d472e20c6d49976fdefef7c5c808195298c9c502a74f3226d9f61" gracePeriod=30 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.027802 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.045303 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.045324 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.045333 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/0e1fd671-9192-4406-b7ea-3a33b4cdec57-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.045351 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.045361 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tfrrm\" (UniqueName: \"kubernetes.io/projected/0e1fd671-9192-4406-b7ea-3a33b4cdec57-kube-api-access-tfrrm\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.047789 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.047845 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data podName:1ea732e7-d11d-4e12-9d44-f8fcafa50de5 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:17.047828463 +0000 UTC m=+1507.868177438 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data") pod "rabbitmq-cell1-server-0" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5") : configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.103895 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.107707 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage12-crc" (UniqueName: "kubernetes.io/local-volume/local-storage12-crc") on node "crc" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.146029 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/599ccc3d-2e89-48e4-9db2-394cfd4364dc-operator-scripts\") pod \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.146128 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v2g64\" (UniqueName: \"kubernetes.io/projected/599ccc3d-2e89-48e4-9db2-394cfd4364dc-kube-api-access-v2g64\") pod \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\" (UID: \"599ccc3d-2e89-48e4-9db2-394cfd4364dc\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.146738 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage12-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage12-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.151887 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/599ccc3d-2e89-48e4-9db2-394cfd4364dc-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "599ccc3d-2e89-48e4-9db2-394cfd4364dc" (UID: "599ccc3d-2e89-48e4-9db2-394cfd4364dc"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.157926 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "0e1fd671-9192-4406-b7ea-3a33b4cdec57" (UID: "0e1fd671-9192-4406-b7ea-3a33b4cdec57"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.157974 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/599ccc3d-2e89-48e4-9db2-394cfd4364dc-kube-api-access-v2g64" (OuterVolumeSpecName: "kube-api-access-v2g64") pod "599ccc3d-2e89-48e4-9db2-394cfd4364dc" (UID: "599ccc3d-2e89-48e4-9db2-394cfd4364dc"). InnerVolumeSpecName "kube-api-access-v2g64". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.203980 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782 is running failed: container process not found" containerID="18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.204370 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782 is running failed: container process not found" containerID="18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.204614 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782 is running failed: container process not found" containerID="18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" cmd=["/usr/bin/pgrep","-r","DRST","nova-scheduler"] Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.204658 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782 is running failed: container process not found" probeType="Readiness" pod="openstack/nova-scheduler-0" podUID="85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" containerName="nova-scheduler-scheduler" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247612 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-internal-tls-certs\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247702 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-run-httpd\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247728 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-config-data\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247774 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247838 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-public-tls-certs\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247857 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-log-httpd\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247914 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-etc-swift\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.247961 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-k2z5f\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-kube-api-access-k2z5f\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.248370 5102 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/0e1fd671-9192-4406-b7ea-3a33b4cdec57-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.248382 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/599ccc3d-2e89-48e4-9db2-394cfd4364dc-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.248392 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v2g64\" (UniqueName: \"kubernetes.io/projected/599ccc3d-2e89-48e4-9db2-394cfd4364dc-kube-api-access-v2g64\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.249095 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.251799 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.256832 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-kube-api-access-k2z5f" (OuterVolumeSpecName: "kube-api-access-k2z5f") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "kube-api-access-k2z5f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.278998 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.368316 5102 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.368345 5102 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.368354 5102 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.368364 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-k2z5f\" (UniqueName: \"kubernetes.io/projected/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-kube-api-access-k2z5f\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.402875 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.403100 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/memcached-0" podUID="d0498339-2dc7-4527-8396-50bbd00b8443" containerName="memcached" containerID="cri-o://88612d72f56f1267bf785aa1c9978b748c76a6d93eedc7a0d71e8ed0f2faec38" gracePeriod=30 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.418426 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.457821 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-8ed5-account-create-update-rnftj"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.484779 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-8ed5-account-create-update-rnftj"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.492637 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.499532 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/cinder-api-0" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api" probeResult="failure" output="Get \"https://10.217.0.169:8776/healthcheck\": read tcp 10.217.0.2:44880->10.217.0.169:8776: read: connection reset by peer" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.508117 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.519140 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.520718 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openstack/keystone-8ed5-account-create-update-bdjvv"] Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.521092 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-server" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.521109 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-server" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.521136 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerName="galera" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.521143 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerName="galera" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.521156 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.521162 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.521178 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" containerName="nova-scheduler-scheduler" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.521184 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" containerName="nova-scheduler-scheduler" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.521202 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerName="mysql-bootstrap" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.521209 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerName="mysql-bootstrap" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.524250 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" containerName="nova-scheduler-scheduler" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.524277 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-httpd" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.524295 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" containerName="galera" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.524310 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" containerName="proxy-server" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.525318 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.528970 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openstack"/"keystone-db-secret" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.533004 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openstack/keystone-8ed5-account-create-update-bdjvv"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.535867 5102 generic.go:334] "Generic (PLEG): container finished" podID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerID="d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8" exitCode=2 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.535922 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerDied","Data":"d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8"} Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.538883 5102 generic.go:334] "Generic (PLEG): container finished" podID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerID="d62cf3f61ec961d54c0543a7c6db6538a2fa229a7aa3236626738a9910298f8a" exitCode=0 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.538923 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f55c94446-2fcrd" event={"ID":"35dab127-50f2-4f30-ba2f-68744d0a6ae8","Type":"ContainerDied","Data":"d62cf3f61ec961d54c0543a7c6db6538a2fa229a7aa3236626738a9910298f8a"} Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.539579 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-bootstrap-zqc5s"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.540367 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-8d62-account-create-update-cj4r8" event={"ID":"a8fe4d72-b09b-4158-b4ea-c59192dbc956","Type":"ContainerDied","Data":"cac5f15c44990ea7a567157ea6b0f808108ecd6d4faf4c4f7486457dba2ece90"} Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.540420 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-8d62-account-create-update-cj4r8" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.545704 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-bootstrap-zqc5s"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.560632 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-sync-k7zdm"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.571182 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-756757b6f5-klql8"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.571961 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/keystone-756757b6f5-klql8" podUID="8dfe2011-cf9e-413e-b53a-c7ff73f81161" containerName="keystone-api" containerID="cri-o://32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f" gracePeriod=30 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.583677 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-sync-k7zdm"] Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.591037 5102 generic.go:334] "Generic (PLEG): container finished" podID="85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" containerID="18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" exitCode=0 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.591166 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f","Type":"ContainerDied","Data":"18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782"} Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.591201 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-scheduler-0" event={"ID":"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f","Type":"ContainerDied","Data":"d12295e27bf9420ee70e17e6014cb366f71ab7c11103757de5d74a01df2eb366"} Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.591229 5102 scope.go:117] "RemoveContainer" containerID="18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.591449 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-scheduler-0" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.604719 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cww7s\" (UniqueName: \"kubernetes.io/projected/a8fe4d72-b09b-4158-b4ea-c59192dbc956-kube-api-access-cww7s\") pod \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.604785 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zssbh\" (UniqueName: \"kubernetes.io/projected/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-kube-api-access-zssbh\") pod \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.604887 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-combined-ca-bundle\") pod \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.604990 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-config-data\") pod \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\" (UID: \"85d1a521-2a0e-4bfa-adf9-5f7ab24d936f\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.605072 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fe4d72-b09b-4158-b4ea-c59192dbc956-operator-scripts\") pod \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\" (UID: \"a8fe4d72-b09b-4158-b4ea-c59192dbc956\") " Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.607073 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.607231 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4qgzn\" (UniqueName: \"kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.608947 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a8fe4d72-b09b-4158-b4ea-c59192dbc956-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "a8fe4d72-b09b-4158-b4ea-c59192dbc956" (UID: "a8fe4d72-b09b-4158-b4ea-c59192dbc956"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.612577 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="241a02f1-ca6d-4c3c-b635-2156947f47c4" path="/var/lib/kubelet/pods/241a02f1-ca6d-4c3c-b635-2156947f47c4/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.613681 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3873fe98-794e-42f7-9b3a-b4d9e8ab64f7" path="/var/lib/kubelet/pods/3873fe98-794e-42f7-9b3a-b4d9e8ab64f7/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.616082 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3c23a8e6-4274-4c33-8f73-95b678f6509c" path="/var/lib/kubelet/pods/3c23a8e6-4274-4c33-8f73-95b678f6509c/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.617312 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c43e79a-0827-4f25-a2b4-9b53ec46f96f" path="/var/lib/kubelet/pods/4c43e79a-0827-4f25-a2b4-9b53ec46f96f/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.618003 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-config-data" (OuterVolumeSpecName: "config-data") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.618362 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5dcb8a22-2a42-4baf-8b27-7041f960563c" path="/var/lib/kubelet/pods/5dcb8a22-2a42-4baf-8b27-7041f960563c/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.619271 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76e95d75-3eab-44f8-9d54-2fe68c2fa4fb" path="/var/lib/kubelet/pods/76e95d75-3eab-44f8-9d54-2fe68c2fa4fb/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.619958 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e48e0ca-0fe0-4be1-8909-11e2407daa7b" path="/var/lib/kubelet/pods/9e48e0ca-0fe0-4be1-8909-11e2407daa7b/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.620650 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b5890481-2315-483e-868e-6145bffd53c3" path="/var/lib/kubelet/pods/b5890481-2315-483e-868e-6145bffd53c3/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.621619 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.623700 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e" path="/var/lib/kubelet/pods/d06b6d0d-a628-42df-ac03-fe2e9a7c8b4e/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.624569 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d9a35726-d2a8-4175-9398-2f49e4598f63" path="/var/lib/kubelet/pods/d9a35726-d2a8-4175-9398-2f49e4598f63/volumes" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.648205 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a8fe4d72-b09b-4158-b4ea-c59192dbc956-kube-api-access-cww7s" (OuterVolumeSpecName: "kube-api-access-cww7s") pod "a8fe4d72-b09b-4158-b4ea-c59192dbc956" (UID: "a8fe4d72-b09b-4158-b4ea-c59192dbc956"). InnerVolumeSpecName "kube-api-access-cww7s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.651584 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.653775 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-kube-api-access-zssbh" (OuterVolumeSpecName: "kube-api-access-zssbh") pod "85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" (UID: "85d1a521-2a0e-4bfa-adf9-5f7ab24d936f"). InnerVolumeSpecName "kube-api-access-zssbh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.669647 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-cell1-galera-0" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.672135 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" (UID: "85d1a521-2a0e-4bfa-adf9-5f7ab24d936f"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.692204 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.702487 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-config-data" (OuterVolumeSpecName: "config-data") pod "85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" (UID: "85d1a521-2a0e-4bfa-adf9-5f7ab24d936f"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.703699 5102 generic.go:334] "Generic (PLEG): container finished" podID="ddb8da53-e17d-4c8d-a625-0d241d2caafd" containerID="60739a227d7d472e20c6d49976fdefef7c5c808195298c9c502a74f3226d9f61" exitCode=2 Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.710714 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.710963 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle\") pod \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\" (UID: \"ac268af7-b49d-40bf-97c8-7abc5ff2bdad\") " Jan 23 07:19:13 crc kubenswrapper[5102]: W0123 07:19:13.711304 5102 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/ac268af7-b49d-40bf-97c8-7abc5ff2bdad/volumes/kubernetes.io~secret/combined-ca-bundle Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.711321 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ac268af7-b49d-40bf-97c8-7abc5ff2bdad" (UID: "ac268af7-b49d-40bf-97c8-7abc5ff2bdad"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712110 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712163 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qgzn\" (UniqueName: \"kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.712470 5102 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.712519 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts podName:813e0bf4-6d9a-472b-950f-2cfe65163e22 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:14.212506623 +0000 UTC m=+1505.032855598 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts") pod "keystone-8ed5-account-create-update-bdjvv" (UID: "813e0bf4-6d9a-472b-950f-2cfe65163e22") : configmap "openstack-scripts" not found Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712744 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712887 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/a8fe4d72-b09b-4158-b4ea-c59192dbc956-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712898 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712910 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cww7s\" (UniqueName: \"kubernetes.io/projected/a8fe4d72-b09b-4158-b4ea-c59192dbc956-kube-api-access-cww7s\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712921 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zssbh\" (UniqueName: \"kubernetes.io/projected/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-kube-api-access-zssbh\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712929 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712937 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.712950 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/ac268af7-b49d-40bf-97c8-7abc5ff2bdad-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.736252 5102 projected.go:194] Error preparing data for projected volume kube-api-access-4qgzn for pod openstack/keystone-8ed5-account-create-update-bdjvv: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.736309 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn podName:813e0bf4-6d9a-472b-950f-2cfe65163e22 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:14.236292932 +0000 UTC m=+1505.056641907 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-4qgzn" (UniqueName: "kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn") pod "keystone-8ed5-account-create-update-bdjvv" (UID: "813e0bf4-6d9a-472b-950f-2cfe65163e22") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 23 07:19:13 crc kubenswrapper[5102]: W0123 07:19:13.756185 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf6b9e803_e21c_4609_ab6d_8e54e7cbe816.slice/crio-e9a202e6c15d394df95bd3e24d3feabeefd0ae036baeced331e8bd73dc6b64fc WatchSource:0}: Error finding container e9a202e6c15d394df95bd3e24d3feabeefd0ae036baeced331e8bd73dc6b64fc: Status 404 returned error can't find the container with id e9a202e6c15d394df95bd3e24d3feabeefd0ae036baeced331e8bd73dc6b64fc Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.788278 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:13 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:13 crc kubenswrapper[5102]: Jan 23 07:19:13 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:13 crc kubenswrapper[5102]: Jan 23 07:19:13 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:13 crc kubenswrapper[5102]: Jan 23 07:19:13 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:13 crc kubenswrapper[5102]: Jan 23 07:19:13 crc kubenswrapper[5102]: if [ -n "" ]; then Jan 23 07:19:13 crc kubenswrapper[5102]: GRANT_DATABASE="" Jan 23 07:19:13 crc kubenswrapper[5102]: else Jan 23 07:19:13 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:13 crc kubenswrapper[5102]: fi Jan 23 07:19:13 crc kubenswrapper[5102]: Jan 23 07:19:13 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:13 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:13 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:13 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:13 crc kubenswrapper[5102]: # support updates Jan 23 07:19:13 crc kubenswrapper[5102]: Jan 23 07:19:13 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:13 crc kubenswrapper[5102]: E0123 07:19:13.789617 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-xrljw" podUID="f6b9e803-e21c-4609-ab6d-8e54e7cbe816" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.941204 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-log" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": read tcp 10.217.0.2:37524->10.217.0.208:8775: read: connection reset by peer" Jan 23 07:19:13 crc kubenswrapper[5102]: I0123 07:19:13.941244 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/nova-metadata-0" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-metadata" probeResult="failure" output="Get \"https://10.217.0.208:8775/\": read tcp 10.217.0.2:37536->10.217.0.208:8775: read: connection reset by peer" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.170814 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171021 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-proxy-845d4fc79c-bhsj4" event={"ID":"ac268af7-b49d-40bf-97c8-7abc5ff2bdad","Type":"ContainerDied","Data":"454146c5127d8cddc2ec09c2420088d3298b0f1ed15431d378cd0a37c9c6c4ff"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171048 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-8ed5-account-create-update-bdjvv"] Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171062 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-db-create-2t4g9"] Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171073 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-db-create-2t4g9"] Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171087 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-cell1-galera-0" event={"ID":"0e1fd671-9192-4406-b7ea-3a33b4cdec57","Type":"ContainerDied","Data":"214cb08a2f4840bce6138becd379b9e3e1c2130ae49f9a8af25bd667c4e1d9eb"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171100 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-f3ca-account-create-update-l5rzm" event={"ID":"599ccc3d-2e89-48e4-9db2-394cfd4364dc","Type":"ContainerDied","Data":"666f5073de7314f3c17bb19d850d645f48f2541be3ea14e2f62ed73fa62118b5"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171111 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-xrljw"] Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171123 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ddb8da53-e17d-4c8d-a625-0d241d2caafd","Type":"ContainerDied","Data":"60739a227d7d472e20c6d49976fdefef7c5c808195298c9c502a74f3226d9f61"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.171144 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-xrljw"] Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.225703 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.226230 5102 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.226296 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts podName:813e0bf4-6d9a-472b-950f-2cfe65163e22 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:15.22627609 +0000 UTC m=+1506.046625065 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts") pod "keystone-8ed5-account-create-update-bdjvv" (UID: "813e0bf4-6d9a-472b-950f-2cfe65163e22") : configmap "openstack-scripts" not found Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.267468 5102 scope.go:117] "RemoveContainer" containerID="18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.268331 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782\": container with ID starting with 18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782 not found: ID does not exist" containerID="18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.268366 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782"} err="failed to get container status \"18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782\": rpc error: code = NotFound desc = could not find container \"18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782\": container with ID starting with 18f93663ca673356b90ae25d6de5d9c109b480d4d4dcbcebbcd42e1e6d68e782 not found: ID does not exist" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.268388 5102 scope.go:117] "RemoveContainer" containerID="8c97a629f34f855d74c165a346be69cd13786a9b440301da9b3e18c2d09f3c5d" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.326811 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f86b8db9b-zlplv" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api" probeResult="failure" output="Get \"https://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:53832->10.217.0.164:9311: read: connection reset by peer" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.327172 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/barbican-api-f86b8db9b-zlplv" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api-log" probeResult="failure" output="Get \"https://10.217.0.164:9311/healthcheck\": read tcp 10.217.0.2:53816->10.217.0.164:9311: read: connection reset by peer" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.328568 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qgzn\" (UniqueName: \"kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.340049 5102 projected.go:194] Error preparing data for projected volume kube-api-access-4qgzn for pod openstack/keystone-8ed5-account-create-update-bdjvv: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.340114 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn podName:813e0bf4-6d9a-472b-950f-2cfe65163e22 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:15.340098125 +0000 UTC m=+1506.160447100 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-4qgzn" (UniqueName: "kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn") pod "keystone-8ed5-account-create-update-bdjvv" (UID: "813e0bf4-6d9a-472b-950f-2cfe65163e22") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.357532 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.367180 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.386287 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.386374 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell1-conductor-0" podUID="6c65ea3f-14be-4130-b116-2291c114323e" containerName="nova-cell1-conductor-conductor" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.402354 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/neutron-795454f649-697pp" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-httpd" probeResult="failure" output="Get \"https://10.217.0.170:9696/\": dial tcp 10.217.0.170:9696: connect: connection refused" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.575000 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openstack/openstack-galera-0" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" containerName="galera" containerID="cri-o://80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8" gracePeriod=30 Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.634516 5102 configmap.go:193] Couldn't get configMap openstack/openstack-cell1-scripts: configmap "openstack-cell1-scripts" not found Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.634610 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts podName:278bc0a5-d40a-4983-b8bd-ae5b8e6af12d nodeName:}" failed. No retries permitted until 2026-01-23 07:19:18.634593661 +0000 UTC m=+1509.454942636 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts") pod "root-account-create-update-gzxp8" (UID: "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d") : configmap "openstack-cell1-scripts" not found Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.740848 5102 generic.go:334] "Generic (PLEG): container finished" podID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerID="a4b253153a5b0ae4b7304fc69166a78bdc78f9b33184fefd123a47d6a29e02a7" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.741257 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8","Type":"ContainerDied","Data":"a4b253153a5b0ae4b7304fc69166a78bdc78f9b33184fefd123a47d6a29e02a7"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.741289 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-api-0" event={"ID":"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8","Type":"ContainerDied","Data":"35ad746a4ac3dd7283b65af8937ae4c8fe6e3a642b36302c9198f96bfaae34cb"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.741303 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="35ad746a4ac3dd7283b65af8937ae4c8fe6e3a642b36302c9198f96bfaae34cb" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.743431 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xrljw" event={"ID":"f6b9e803-e21c-4609-ab6d-8e54e7cbe816","Type":"ContainerStarted","Data":"e9a202e6c15d394df95bd3e24d3feabeefd0ae036baeced331e8bd73dc6b64fc"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.744445 5102 kubelet_pods.go:1007] "Unable to retrieve pull secret, the image pull may not succeed." pod="openstack/root-account-create-update-xrljw" secret="" err="secret \"galera-openstack-dockercfg-jwnc4\" not found" Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.747107 5102 kuberuntime_manager.go:1274] "Unhandled Error" err=< Jan 23 07:19:14 crc kubenswrapper[5102]: container &Container{Name:mariadb-account-create-update,Image:quay.io/podified-antelope-centos9/openstack-mariadb@sha256:ed0f8ba03f3ce47a32006d730c3049455325eb2c3b98b9fd6b3fb9901004df13,Command:[/bin/sh -c #!/bin/bash Jan 23 07:19:14 crc kubenswrapper[5102]: Jan 23 07:19:14 crc kubenswrapper[5102]: MYSQL_REMOTE_HOST="" source /var/lib/operator-scripts/mysql_root_auth.sh Jan 23 07:19:14 crc kubenswrapper[5102]: Jan 23 07:19:14 crc kubenswrapper[5102]: export DatabasePassword=${DatabasePassword:?"Please specify a DatabasePassword variable."} Jan 23 07:19:14 crc kubenswrapper[5102]: Jan 23 07:19:14 crc kubenswrapper[5102]: MYSQL_CMD="mysql -h -u root -P 3306" Jan 23 07:19:14 crc kubenswrapper[5102]: Jan 23 07:19:14 crc kubenswrapper[5102]: if [ -n "" ]; then Jan 23 07:19:14 crc kubenswrapper[5102]: GRANT_DATABASE="" Jan 23 07:19:14 crc kubenswrapper[5102]: else Jan 23 07:19:14 crc kubenswrapper[5102]: GRANT_DATABASE="*" Jan 23 07:19:14 crc kubenswrapper[5102]: fi Jan 23 07:19:14 crc kubenswrapper[5102]: Jan 23 07:19:14 crc kubenswrapper[5102]: # going for maximum compatibility here: Jan 23 07:19:14 crc kubenswrapper[5102]: # 1. MySQL 8 no longer allows implicit create user when GRANT is used Jan 23 07:19:14 crc kubenswrapper[5102]: # 2. MariaDB has "CREATE OR REPLACE", but MySQL does not Jan 23 07:19:14 crc kubenswrapper[5102]: # 3. create user with CREATE but then do all password and TLS with ALTER to Jan 23 07:19:14 crc kubenswrapper[5102]: # support updates Jan 23 07:19:14 crc kubenswrapper[5102]: Jan 23 07:19:14 crc kubenswrapper[5102]: $MYSQL_CMD < logger="UnhandledError" Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.748241 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"mariadb-account-create-update\" with CreateContainerConfigError: \"secret \\\"openstack-mariadb-root-db-secret\\\" not found\"" pod="openstack/root-account-create-update-xrljw" podUID="f6b9e803-e21c-4609-ab6d-8e54e7cbe816" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.748879 5102 generic.go:334] "Generic (PLEG): container finished" podID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerID="da77c21b9df506f687f044080259f2d216b0315a1410a1b0676e52084c699b33" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.748941 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f86b8db9b-zlplv" event={"ID":"84ff9e74-154d-4279-befe-109c03fb7c3b","Type":"ContainerDied","Data":"da77c21b9df506f687f044080259f2d216b0315a1410a1b0676e52084c699b33"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.750664 5102 generic.go:334] "Generic (PLEG): container finished" podID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerID="d16b0a4419002db2415cab085fc8a5390ea935e4fea5424b97b0f8ead9c68fef" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.750698 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6c3459b4-efed-4868-8fd0-ffeb07f0100d","Type":"ContainerDied","Data":"d16b0a4419002db2415cab085fc8a5390ea935e4fea5424b97b0f8ead9c68fef"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.750715 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-external-api-0" event={"ID":"6c3459b4-efed-4868-8fd0-ffeb07f0100d","Type":"ContainerDied","Data":"7275eefaa50c0f547dc535e4091e47190a2783c71fe8af0be988690b146870a7"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.750727 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="7275eefaa50c0f547dc535e4091e47190a2783c71fe8af0be988690b146870a7" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.752456 5102 generic.go:334] "Generic (PLEG): container finished" podID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerID="4f979b76f22ef2e8f8509c19caa21930b8a908a0e7b25aba0b15129e8e286021" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.752492 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"302ce3d2-72f6-429c-b3cb-16e8fba0d04e","Type":"ContainerDied","Data":"4f979b76f22ef2e8f8509c19caa21930b8a908a0e7b25aba0b15129e8e286021"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.752507 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/glance-default-internal-api-0" event={"ID":"302ce3d2-72f6-429c-b3cb-16e8fba0d04e","Type":"ContainerDied","Data":"1424d3138b9fc171ad05c4e12d1b5bc51aa190950e5a6c16f8a8772396a4f512"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.752516 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1424d3138b9fc171ad05c4e12d1b5bc51aa190950e5a6c16f8a8772396a4f512" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.756404 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/placement-5f55c94446-2fcrd" event={"ID":"35dab127-50f2-4f30-ba2f-68744d0a6ae8","Type":"ContainerDied","Data":"b7288f5f6274dd79e1a3005878fef3cfc5f379444935eb7909b3c44c8a67ca49"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.756425 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b7288f5f6274dd79e1a3005878fef3cfc5f379444935eb7909b3c44c8a67ca49" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.763237 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1a44c7a2-d363-4438-b9db-ebd62b910427/ovn-northd/0.log" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.763269 5102 generic.go:334] "Generic (PLEG): container finished" podID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" exitCode=139 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.763314 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1a44c7a2-d363-4438-b9db-ebd62b910427","Type":"ContainerDied","Data":"fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.764989 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/kube-state-metrics-0" event={"ID":"ddb8da53-e17d-4c8d-a625-0d241d2caafd","Type":"ContainerDied","Data":"36a88a72098c84ef4ea7189f649900699e4b2e3ab7c027f93d8f04112c60d1ec"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.765005 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="36a88a72098c84ef4ea7189f649900699e4b2e3ab7c027f93d8f04112c60d1ec" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.776225 5102 generic.go:334] "Generic (PLEG): container finished" podID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerID="f4d4bf4c2380f1096c71e372015b67fe76544c3993ebfdd80decba57527e35ae" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.776300 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66d1a45d-2635-496c-92c1-86e3a686c5b8","Type":"ContainerDied","Data":"f4d4bf4c2380f1096c71e372015b67fe76544c3993ebfdd80decba57527e35ae"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.785653 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-gzxp8" event={"ID":"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d","Type":"ContainerDied","Data":"cd07c0b2036a38ddffd40b1c25ac187f97d30bf8e3d3b503357af3f840fd2bf3"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.785687 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd07c0b2036a38ddffd40b1c25ac187f97d30bf8e3d3b503357af3f840fd2bf3" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.798709 5102 generic.go:334] "Generic (PLEG): container finished" podID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerID="d878afc3004c8f5f3e7c7a5b43603e184202681b56d93e9496c6bc1f56835ac3" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.798762 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f48c766d5-kqw8p" event={"ID":"6abef536-ae8a-4a68-9c29-87a9af5aaee6","Type":"ContainerDied","Data":"d878afc3004c8f5f3e7c7a5b43603e184202681b56d93e9496c6bc1f56835ac3"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.802060 5102 generic.go:334] "Generic (PLEG): container finished" podID="47725711-7e88-4c25-8016-f70488231203" containerID="4d7b6d603a00e934b80420f8abd17cabe70620c234e7bd375a48fc68ea87c3ac" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.802103 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47725711-7e88-4c25-8016-f70488231203","Type":"ContainerDied","Data":"4d7b6d603a00e934b80420f8abd17cabe70620c234e7bd375a48fc68ea87c3ac"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.802121 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-metadata-0" event={"ID":"47725711-7e88-4c25-8016-f70488231203","Type":"ContainerDied","Data":"1bf64d3506452b71b3c6d755c297e01e41f0ce2c2aaf8cc84930acdf1c5bb730"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.802132 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1bf64d3506452b71b3c6d755c297e01e41f0ce2c2aaf8cc84930acdf1c5bb730" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.806478 5102 generic.go:334] "Generic (PLEG): container finished" podID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerID="7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.806499 5102 generic.go:334] "Generic (PLEG): container finished" podID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerID="3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.806549 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerDied","Data":"7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.806566 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerDied","Data":"3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc"} Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.831618 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.835556 5102 generic.go:334] "Generic (PLEG): container finished" podID="d0498339-2dc7-4527-8396-50bbd00b8443" containerID="88612d72f56f1267bf785aa1c9978b748c76a6d93eedc7a0d71e8ed0f2faec38" exitCode=0 Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.835632 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d0498339-2dc7-4527-8396-50bbd00b8443","Type":"ContainerDied","Data":"88612d72f56f1267bf785aa1c9978b748c76a6d93eedc7a0d71e8ed0f2faec38"} Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.838391 5102 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.838436 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts podName:f6b9e803-e21c-4609-ab6d-8e54e7cbe816 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:15.338423371 +0000 UTC m=+1506.158772346 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts") pod "root-account-create-update-xrljw" (UID: "f6b9e803-e21c-4609-ab6d-8e54e7cbe816") : configmap "openstack-scripts" not found Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.838712 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.840319 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" cmd=["/usr/bin/pgrep","-r","DRST","nova-conductor"] Jan 23 07:19:14 crc kubenswrapper[5102]: E0123 07:19:14.840357 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/nova-cell0-conductor-0" podUID="1893371f-b289-4336-a8ed-1bd78e9191b6" containerName="nova-cell0-conductor-conductor" Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.841211 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-f350-account-create-update-h8rtr" event={"ID":"91ab8988-66ac-4643-b729-76d2575d0ad0","Type":"ContainerDied","Data":"d8b233317f27c289cc069464d0896a958f4ff44a9803a3330a4c763a670e10c0"} Jan 23 07:19:14 crc kubenswrapper[5102]: I0123 07:19:14.841236 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d8b233317f27c289cc069464d0896a958f4ff44a9803a3330a4c763a670e10c0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.080825 5102 scope.go:117] "RemoveContainer" containerID="ff22da91cf223373d458a345f27d72a475d66d4886ff0e810b76172b7cd4967a" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.099389 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.117092 5102 scope.go:117] "RemoveContainer" containerID="4e4445ea0de06f9fa2ec0a6389c9a2952e55e6ae8854f80a494b563a6aab848e" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.126009 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-l5rzm"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.133813 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-f3ca-account-create-update-l5rzm"] Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.133812 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="unmounted volumes=[kube-api-access-4qgzn operator-scripts], unattached volumes=[], failed to process volumes=[]: context canceled" pod="openstack/keystone-8ed5-account-create-update-bdjvv" podUID="813e0bf4-6d9a-472b-950f-2cfe65163e22" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.147113 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2wt8t\" (UniqueName: \"kubernetes.io/projected/91ab8988-66ac-4643-b729-76d2575d0ad0-kube-api-access-2wt8t\") pod \"91ab8988-66ac-4643-b729-76d2575d0ad0\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.147312 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ab8988-66ac-4643-b729-76d2575d0ad0-operator-scripts\") pod \"91ab8988-66ac-4643-b729-76d2575d0ad0\" (UID: \"91ab8988-66ac-4643-b729-76d2575d0ad0\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.149747 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.151325 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/91ab8988-66ac-4643-b729-76d2575d0ad0-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "91ab8988-66ac-4643-b729-76d2575d0ad0" (UID: "91ab8988-66ac-4643-b729-76d2575d0ad0"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.155431 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-cell1-galera-0"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.158087 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/91ab8988-66ac-4643-b729-76d2575d0ad0-kube-api-access-2wt8t" (OuterVolumeSpecName: "kube-api-access-2wt8t") pod "91ab8988-66ac-4643-b729-76d2575d0ad0" (UID: "91ab8988-66ac-4643-b729-76d2575d0ad0"). InnerVolumeSpecName "kube-api-access-2wt8t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.174359 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.180678 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.182927 5102 scope.go:117] "RemoveContainer" containerID="c460a5da6c92f225aaabae26dde8398c051c62f311c254bc8f7ac9b476cfabcf" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.182942 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.199191 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-8d62-account-create-update-cj4r8"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.213583 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-8d62-account-create-update-cj4r8"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.229827 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.233920 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-scheduler-0"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.244626 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.246940 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.259785 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-combined-ca-bundle\") pod \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.259837 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35dab127-50f2-4f30-ba2f-68744d0a6ae8-logs\") pod \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.259889 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v9nrp\" (UniqueName: \"kubernetes.io/projected/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-kube-api-access-v9nrp\") pod \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.259926 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-h8fbj\" (UniqueName: \"kubernetes.io/projected/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-kube-api-access-h8fbj\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.259963 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts\") pod \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\" (UID: \"278bc0a5-d40a-4983-b8bd-ae5b8e6af12d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.259984 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-internal-tls-certs\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260000 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data-custom\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260029 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-etc-machine-id\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260131 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-scripts\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260157 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-config-data\") pod \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260180 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260197 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-internal-tls-certs\") pod \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260213 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-public-tls-certs\") pod \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260233 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-combined-ca-bundle\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260250 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-scripts\") pod \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260265 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j4kdl\" (UniqueName: \"kubernetes.io/projected/35dab127-50f2-4f30-ba2f-68744d0a6ae8-kube-api-access-j4kdl\") pod \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\" (UID: \"35dab127-50f2-4f30-ba2f-68744d0a6ae8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260289 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-logs\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260339 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-public-tls-certs\") pod \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\" (UID: \"bd9d4f50-cf1a-4235-8fc5-502b4a488cb8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.260698 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.261164 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/91ab8988-66ac-4643-b729-76d2575d0ad0-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.261180 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2wt8t\" (UniqueName: \"kubernetes.io/projected/91ab8988-66ac-4643-b729-76d2575d0ad0-kube-api-access-2wt8t\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.261304 5102 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.261350 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts podName:813e0bf4-6d9a-472b-950f-2cfe65163e22 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:17.261336358 +0000 UTC m=+1508.081685333 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts") pod "keystone-8ed5-account-create-update-bdjvv" (UID: "813e0bf4-6d9a-472b-950f-2cfe65163e22") : configmap "openstack-scripts" not found Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.263601 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-logs" (OuterVolumeSpecName: "logs") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.266365 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35dab127-50f2-4f30-ba2f-68744d0a6ae8-logs" (OuterVolumeSpecName: "logs") pod "35dab127-50f2-4f30-ba2f-68744d0a6ae8" (UID: "35dab127-50f2-4f30-ba2f-68744d0a6ae8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.267300 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35dab127-50f2-4f30-ba2f-68744d0a6ae8-kube-api-access-j4kdl" (OuterVolumeSpecName: "kube-api-access-j4kdl") pod "35dab127-50f2-4f30-ba2f-68744d0a6ae8" (UID: "35dab127-50f2-4f30-ba2f-68744d0a6ae8"). InnerVolumeSpecName "kube-api-access-j4kdl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.267916 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-kube-api-access-h8fbj" (OuterVolumeSpecName: "kube-api-access-h8fbj") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "kube-api-access-h8fbj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.272670 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-scripts" (OuterVolumeSpecName: "scripts") pod "35dab127-50f2-4f30-ba2f-68744d0a6ae8" (UID: "35dab127-50f2-4f30-ba2f-68744d0a6ae8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.273380 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d" (UID: "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.275198 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.286262 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.309752 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.313201 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-scripts" (OuterVolumeSpecName: "scripts") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.328591 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-kube-api-access-v9nrp" (OuterVolumeSpecName: "kube-api-access-v9nrp") pod "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d" (UID: "278bc0a5-d40a-4983-b8bd-ae5b8e6af12d"). InnerVolumeSpecName "kube-api-access-v9nrp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.352738 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.356484 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.356834 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.357205 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.357227 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-config-data" (OuterVolumeSpecName: "config-data") pod "35dab127-50f2-4f30-ba2f-68744d0a6ae8" (UID: "35dab127-50f2-4f30-ba2f-68744d0a6ae8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363299 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-scripts\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363342 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-config-data\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363370 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-combined-ca-bundle\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363422 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-config-data\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363441 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-logs\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363463 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l2wmg\" (UniqueName: \"kubernetes.io/projected/6c3459b4-efed-4868-8fd0-ffeb07f0100d-kube-api-access-l2wmg\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363489 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-internal-tls-certs\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363507 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-certs\") pod \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363526 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363575 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hh95l\" (UniqueName: \"kubernetes.io/projected/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-kube-api-access-hh95l\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363601 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-config\") pod \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363656 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-httpd-run\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363684 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-combined-ca-bundle\") pod \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363713 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-scripts\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363738 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-public-tls-certs\") pod \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\" (UID: \"6c3459b4-efed-4868-8fd0-ffeb07f0100d\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363756 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-combined-ca-bundle\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363780 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-logs\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363800 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"glance\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363857 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jw455\" (UniqueName: \"kubernetes.io/projected/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-api-access-jw455\") pod \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\" (UID: \"ddb8da53-e17d-4c8d-a625-0d241d2caafd\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.363913 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-httpd-run\") pod \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\" (UID: \"302ce3d2-72f6-429c-b3cb-16e8fba0d04e\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364104 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4qgzn\" (UniqueName: \"kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn\") pod \"keystone-8ed5-account-create-update-bdjvv\" (UID: \"813e0bf4-6d9a-472b-950f-2cfe65163e22\") " pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364273 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364288 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364296 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364306 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j4kdl\" (UniqueName: \"kubernetes.io/projected/35dab127-50f2-4f30-ba2f-68744d0a6ae8-kube-api-access-j4kdl\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364316 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364325 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364333 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/35dab127-50f2-4f30-ba2f-68744d0a6ae8-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364341 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v9nrp\" (UniqueName: \"kubernetes.io/projected/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-kube-api-access-v9nrp\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364350 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-h8fbj\" (UniqueName: \"kubernetes.io/projected/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-kube-api-access-h8fbj\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364359 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364368 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364377 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.364385 5102 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.364445 5102 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.364490 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts podName:f6b9e803-e21c-4609-ab6d-8e54e7cbe816 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:16.364475396 +0000 UTC m=+1507.184824371 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts") pod "root-account-create-update-xrljw" (UID: "f6b9e803-e21c-4609-ab6d-8e54e7cbe816") : configmap "openstack-scripts" not found Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.365199 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-logs" (OuterVolumeSpecName: "logs") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.365528 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.399077 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-logs" (OuterVolumeSpecName: "logs") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.402692 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-httpd-run" (OuterVolumeSpecName: "httpd-run") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "httpd-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.407154 5102 projected.go:194] Error preparing data for projected volume kube-api-access-4qgzn for pod openstack/keystone-8ed5-account-create-update-bdjvv: failed to fetch token: serviceaccounts "galera-openstack" not found Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.407339 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn podName:813e0bf4-6d9a-472b-950f-2cfe65163e22 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:17.407305745 +0000 UTC m=+1508.227654720 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-4qgzn" (UniqueName: "kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn") pod "keystone-8ed5-account-create-update-bdjvv" (UID: "813e0bf4-6d9a-472b-950f-2cfe65163e22") : failed to fetch token: serviceaccounts "galera-openstack" not found Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.414887 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage06-crc" (OuterVolumeSpecName: "glance") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "local-storage06-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.426255 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.436849 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c3459b4-efed-4868-8fd0-ffeb07f0100d-kube-api-access-l2wmg" (OuterVolumeSpecName: "kube-api-access-l2wmg") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "kube-api-access-l2wmg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.436927 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage11-crc" (OuterVolumeSpecName: "glance") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "local-storage11-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.437034 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-scripts" (OuterVolumeSpecName: "scripts") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.439730 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.456850 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.459866 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1a44c7a2-d363-4438-b9db-ebd62b910427/ovn-northd/0.log" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.460163 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465288 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-public-tls-certs\") pod \"84ff9e74-154d-4279-befe-109c03fb7c3b\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465380 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rz9gq\" (UniqueName: \"kubernetes.io/projected/66d1a45d-2635-496c-92c1-86e3a686c5b8-kube-api-access-rz9gq\") pod \"66d1a45d-2635-496c-92c1-86e3a686c5b8\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465475 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-internal-tls-certs\") pod \"84ff9e74-154d-4279-befe-109c03fb7c3b\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465522 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bvzfj\" (UniqueName: \"kubernetes.io/projected/84ff9e74-154d-4279-befe-109c03fb7c3b-kube-api-access-bvzfj\") pod \"84ff9e74-154d-4279-befe-109c03fb7c3b\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465565 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-nova-metadata-tls-certs\") pod \"47725711-7e88-4c25-8016-f70488231203\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465611 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data-custom\") pod \"84ff9e74-154d-4279-befe-109c03fb7c3b\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465637 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47725711-7e88-4c25-8016-f70488231203-logs\") pod \"47725711-7e88-4c25-8016-f70488231203\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465655 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84ff9e74-154d-4279-befe-109c03fb7c3b-logs\") pod \"84ff9e74-154d-4279-befe-109c03fb7c3b\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465704 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data\") pod \"84ff9e74-154d-4279-befe-109c03fb7c3b\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465732 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-config-data\") pod \"66d1a45d-2635-496c-92c1-86e3a686c5b8\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465960 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-internal-tls-certs\") pod \"66d1a45d-2635-496c-92c1-86e3a686c5b8\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.465979 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-combined-ca-bundle\") pod \"47725711-7e88-4c25-8016-f70488231203\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466021 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-public-tls-certs\") pod \"66d1a45d-2635-496c-92c1-86e3a686c5b8\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466054 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-combined-ca-bundle\") pod \"84ff9e74-154d-4279-befe-109c03fb7c3b\" (UID: \"84ff9e74-154d-4279-befe-109c03fb7c3b\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466085 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-config-data\") pod \"47725711-7e88-4c25-8016-f70488231203\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466128 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66d1a45d-2635-496c-92c1-86e3a686c5b8-logs\") pod \"66d1a45d-2635-496c-92c1-86e3a686c5b8\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466146 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gh9lp\" (UniqueName: \"kubernetes.io/projected/47725711-7e88-4c25-8016-f70488231203-kube-api-access-gh9lp\") pod \"47725711-7e88-4c25-8016-f70488231203\" (UID: \"47725711-7e88-4c25-8016-f70488231203\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466169 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-combined-ca-bundle\") pod \"66d1a45d-2635-496c-92c1-86e3a686c5b8\" (UID: \"66d1a45d-2635-496c-92c1-86e3a686c5b8\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466503 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466524 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466546 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466557 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466565 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466573 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l2wmg\" (UniqueName: \"kubernetes.io/projected/6c3459b4-efed-4868-8fd0-ffeb07f0100d-kube-api-access-l2wmg\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466587 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.466596 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-run\" (UniqueName: \"kubernetes.io/empty-dir/6c3459b4-efed-4868-8fd0-ffeb07f0100d-httpd-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.470380 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-api-access-jw455" (OuterVolumeSpecName: "kube-api-access-jw455") pod "ddb8da53-e17d-4c8d-a625-0d241d2caafd" (UID: "ddb8da53-e17d-4c8d-a625-0d241d2caafd"). InnerVolumeSpecName "kube-api-access-jw455". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.472561 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-kube-api-access-hh95l" (OuterVolumeSpecName: "kube-api-access-hh95l") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "kube-api-access-hh95l". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.473984 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-scripts" (OuterVolumeSpecName: "scripts") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.474286 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.474641 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/66d1a45d-2635-496c-92c1-86e3a686c5b8-logs" (OuterVolumeSpecName: "logs") pod "66d1a45d-2635-496c-92c1-86e3a686c5b8" (UID: "66d1a45d-2635-496c-92c1-86e3a686c5b8"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.475267 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/84ff9e74-154d-4279-befe-109c03fb7c3b-logs" (OuterVolumeSpecName: "logs") pod "84ff9e74-154d-4279-befe-109c03fb7c3b" (UID: "84ff9e74-154d-4279-befe-109c03fb7c3b"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.479918 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/47725711-7e88-4c25-8016-f70488231203-logs" (OuterVolumeSpecName: "logs") pod "47725711-7e88-4c25-8016-f70488231203" (UID: "47725711-7e88-4c25-8016-f70488231203"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.481899 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/47725711-7e88-4c25-8016-f70488231203-kube-api-access-gh9lp" (OuterVolumeSpecName: "kube-api-access-gh9lp") pod "47725711-7e88-4c25-8016-f70488231203" (UID: "47725711-7e88-4c25-8016-f70488231203"). InnerVolumeSpecName "kube-api-access-gh9lp". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.489280 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "84ff9e74-154d-4279-befe-109c03fb7c3b" (UID: "84ff9e74-154d-4279-befe-109c03fb7c3b"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.490911 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/66d1a45d-2635-496c-92c1-86e3a686c5b8-kube-api-access-rz9gq" (OuterVolumeSpecName: "kube-api-access-rz9gq") pod "66d1a45d-2635-496c-92c1-86e3a686c5b8" (UID: "66d1a45d-2635-496c-92c1-86e3a686c5b8"). InnerVolumeSpecName "kube-api-access-rz9gq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.494519 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/84ff9e74-154d-4279-befe-109c03fb7c3b-kube-api-access-bvzfj" (OuterVolumeSpecName: "kube-api-access-bvzfj") pod "84ff9e74-154d-4279-befe-109c03fb7c3b" (UID: "84ff9e74-154d-4279-befe-109c03fb7c3b"). InnerVolumeSpecName "kube-api-access-bvzfj". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.508697 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-config" (OuterVolumeSpecName: "kube-state-metrics-tls-config") pod "ddb8da53-e17d-4c8d-a625-0d241d2caafd" (UID: "ddb8da53-e17d-4c8d-a625-0d241d2caafd"). InnerVolumeSpecName "kube-state-metrics-tls-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.565700 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.569378 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-memcached-tls-certs\") pod \"d0498339-2dc7-4527-8396-50bbd00b8443\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.569441 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9rnvx\" (UniqueName: \"kubernetes.io/projected/57f488ce-4b72-40f4-82d8-ad074776c306-kube-api-access-9rnvx\") pod \"57f488ce-4b72-40f4-82d8-ad074776c306\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.569482 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data\") pod \"57f488ce-4b72-40f4-82d8-ad074776c306\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.570324 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-config-data\") pod \"d0498339-2dc7-4527-8396-50bbd00b8443\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.570356 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-kolla-config\") pod \"d0498339-2dc7-4527-8396-50bbd00b8443\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.570394 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data-custom\") pod \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.571110 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-scripts\") pod \"1a44c7a2-d363-4438-b9db-ebd62b910427\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.571131 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-combined-ca-bundle\") pod \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.571782 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-config-data" (OuterVolumeSpecName: "config-data") pod "d0498339-2dc7-4527-8396-50bbd00b8443" (UID: "d0498339-2dc7-4527-8396-50bbd00b8443"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.572242 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "d0498339-2dc7-4527-8396-50bbd00b8443" (UID: "d0498339-2dc7-4527-8396-50bbd00b8443"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.572826 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-scripts" (OuterVolumeSpecName: "scripts") pod "1a44c7a2-d363-4438-b9db-ebd62b910427" (UID: "1a44c7a2-d363-4438-b9db-ebd62b910427"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575312 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kwwmw\" (UniqueName: \"kubernetes.io/projected/d0498339-2dc7-4527-8396-50bbd00b8443-kube-api-access-kwwmw\") pod \"d0498339-2dc7-4527-8396-50bbd00b8443\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575366 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xpfw8\" (UniqueName: \"kubernetes.io/projected/1a44c7a2-d363-4438-b9db-ebd62b910427-kube-api-access-xpfw8\") pod \"1a44c7a2-d363-4438-b9db-ebd62b910427\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575384 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-northd-tls-certs\") pod \"1a44c7a2-d363-4438-b9db-ebd62b910427\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575420 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6abef536-ae8a-4a68-9c29-87a9af5aaee6-logs\") pod \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575439 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-metrics-certs-tls-certs\") pod \"1a44c7a2-d363-4438-b9db-ebd62b910427\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575485 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-combined-ca-bundle\") pod \"57f488ce-4b72-40f4-82d8-ad074776c306\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575560 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-combined-ca-bundle\") pod \"d0498339-2dc7-4527-8396-50bbd00b8443\" (UID: \"d0498339-2dc7-4527-8396-50bbd00b8443\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575671 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57f488ce-4b72-40f4-82d8-ad074776c306-logs\") pod \"57f488ce-4b72-40f4-82d8-ad074776c306\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575705 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data-custom\") pod \"57f488ce-4b72-40f4-82d8-ad074776c306\" (UID: \"57f488ce-4b72-40f4-82d8-ad074776c306\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575738 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data\") pod \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575762 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-combined-ca-bundle\") pod \"1a44c7a2-d363-4438-b9db-ebd62b910427\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575788 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-config\") pod \"1a44c7a2-d363-4438-b9db-ebd62b910427\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575809 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-rundir\") pod \"1a44c7a2-d363-4438-b9db-ebd62b910427\" (UID: \"1a44c7a2-d363-4438-b9db-ebd62b910427\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575852 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ssvr6\" (UniqueName: \"kubernetes.io/projected/6abef536-ae8a-4a68-9c29-87a9af5aaee6-kube-api-access-ssvr6\") pod \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\" (UID: \"6abef536-ae8a-4a68-9c29-87a9af5aaee6\") " Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.575995 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57f488ce-4b72-40f4-82d8-ad074776c306-kube-api-access-9rnvx" (OuterVolumeSpecName: "kube-api-access-9rnvx") pod "57f488ce-4b72-40f4-82d8-ad074776c306" (UID: "57f488ce-4b72-40f4-82d8-ad074776c306"). InnerVolumeSpecName "kube-api-access-9rnvx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576525 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hh95l\" (UniqueName: \"kubernetes.io/projected/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-kube-api-access-hh95l\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576632 5102 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-config\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576647 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/66d1a45d-2635-496c-92c1-86e3a686c5b8-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576660 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gh9lp\" (UniqueName: \"kubernetes.io/projected/47725711-7e88-4c25-8016-f70488231203-kube-api-access-gh9lp\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576672 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576682 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576694 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rz9gq\" (UniqueName: \"kubernetes.io/projected/66d1a45d-2635-496c-92c1-86e3a686c5b8-kube-api-access-rz9gq\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576706 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9rnvx\" (UniqueName: \"kubernetes.io/projected/57f488ce-4b72-40f4-82d8-ad074776c306-kube-api-access-9rnvx\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576717 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576727 5102 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/d0498339-2dc7-4527-8396-50bbd00b8443-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576738 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bvzfj\" (UniqueName: \"kubernetes.io/projected/84ff9e74-154d-4279-befe-109c03fb7c3b-kube-api-access-bvzfj\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576749 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576760 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jw455\" (UniqueName: \"kubernetes.io/projected/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-api-access-jw455\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576772 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576783 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/47725711-7e88-4c25-8016-f70488231203-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.576794 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/84ff9e74-154d-4279-befe-109c03fb7c3b-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.577069 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-config-data: configmap "rabbitmq-config-data" not found Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.577118 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data podName:f4fc3e1d-5fac-4696-a8eb-709db37b5ff6 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:23.577100259 +0000 UTC m=+1514.397449244 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data") pod "rabbitmq-server-0" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6") : configmap "rabbitmq-config-data" not found Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.580342 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57f488ce-4b72-40f4-82d8-ad074776c306-logs" (OuterVolumeSpecName: "logs") pod "57f488ce-4b72-40f4-82d8-ad074776c306" (UID: "57f488ce-4b72-40f4-82d8-ad074776c306"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.583909 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-config" (OuterVolumeSpecName: "config") pod "1a44c7a2-d363-4438-b9db-ebd62b910427" (UID: "1a44c7a2-d363-4438-b9db-ebd62b910427"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.585353 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-rundir" (OuterVolumeSpecName: "ovn-rundir") pod "1a44c7a2-d363-4438-b9db-ebd62b910427" (UID: "1a44c7a2-d363-4438-b9db-ebd62b910427"). InnerVolumeSpecName "ovn-rundir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.586344 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/6abef536-ae8a-4a68-9c29-87a9af5aaee6-logs" (OuterVolumeSpecName: "logs") pod "6abef536-ae8a-4a68-9c29-87a9af5aaee6" (UID: "6abef536-ae8a-4a68-9c29-87a9af5aaee6"). InnerVolumeSpecName "logs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.602094 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1a44c7a2-d363-4438-b9db-ebd62b910427-kube-api-access-xpfw8" (OuterVolumeSpecName: "kube-api-access-xpfw8") pod "1a44c7a2-d363-4438-b9db-ebd62b910427" (UID: "1a44c7a2-d363-4438-b9db-ebd62b910427"). InnerVolumeSpecName "kube-api-access-xpfw8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.604738 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "6abef536-ae8a-4a68-9c29-87a9af5aaee6" (UID: "6abef536-ae8a-4a68-9c29-87a9af5aaee6"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.615720 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0e1fd671-9192-4406-b7ea-3a33b4cdec57" path="/var/lib/kubelet/pods/0e1fd671-9192-4406-b7ea-3a33b4cdec57/volumes" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.621629 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d0498339-2dc7-4527-8396-50bbd00b8443-kube-api-access-kwwmw" (OuterVolumeSpecName: "kube-api-access-kwwmw") pod "d0498339-2dc7-4527-8396-50bbd00b8443" (UID: "d0498339-2dc7-4527-8396-50bbd00b8443"). InnerVolumeSpecName "kube-api-access-kwwmw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.634768 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "57f488ce-4b72-40f4-82d8-ad074776c306" (UID: "57f488ce-4b72-40f4-82d8-ad074776c306"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.637111 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="599ccc3d-2e89-48e4-9db2-394cfd4364dc" path="/var/lib/kubelet/pods/599ccc3d-2e89-48e4-9db2-394cfd4364dc/volumes" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.637762 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="85d1a521-2a0e-4bfa-adf9-5f7ab24d936f" path="/var/lib/kubelet/pods/85d1a521-2a0e-4bfa-adf9-5f7ab24d936f/volumes" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.638641 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a8fe4d72-b09b-4158-b4ea-c59192dbc956" path="/var/lib/kubelet/pods/a8fe4d72-b09b-4158-b4ea-c59192dbc956/volumes" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.639112 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ebe64681-d043-4b12-b1ae-2306ef0e294f" path="/var/lib/kubelet/pods/ebe64681-d043-4b12-b1ae-2306ef0e294f/volumes" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.648390 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6abef536-ae8a-4a68-9c29-87a9af5aaee6-kube-api-access-ssvr6" (OuterVolumeSpecName: "kube-api-access-ssvr6") pod "6abef536-ae8a-4a68-9c29-87a9af5aaee6" (UID: "6abef536-ae8a-4a68-9c29-87a9af5aaee6"). InnerVolumeSpecName "kube-api-access-ssvr6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678758 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678790 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1a44c7a2-d363-4438-b9db-ebd62b910427-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678802 5102 reconciler_common.go:293] "Volume detached for volume \"ovn-rundir\" (UniqueName: \"kubernetes.io/empty-dir/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-rundir\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678811 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ssvr6\" (UniqueName: \"kubernetes.io/projected/6abef536-ae8a-4a68-9c29-87a9af5aaee6-kube-api-access-ssvr6\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678822 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678831 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kwwmw\" (UniqueName: \"kubernetes.io/projected/d0498339-2dc7-4527-8396-50bbd00b8443-kube-api-access-kwwmw\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678841 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xpfw8\" (UniqueName: \"kubernetes.io/projected/1a44c7a2-d363-4438-b9db-ebd62b910427-kube-api-access-xpfw8\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678849 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/6abef536-ae8a-4a68-9c29-87a9af5aaee6-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.678857 5102 reconciler_common.go:293] "Volume detached for volume \"logs\" (UniqueName: \"kubernetes.io/empty-dir/57f488ce-4b72-40f4-82d8-ad074776c306-logs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.684578 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-config-data" (OuterVolumeSpecName: "config-data") pod "47725711-7e88-4c25-8016-f70488231203" (UID: "47725711-7e88-4c25-8016-f70488231203"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.708948 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.710258 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data" (OuterVolumeSpecName: "config-data") pod "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" (UID: "bd9d4f50-cf1a-4235-8fc5-502b4a488cb8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.767641 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage11-crc" (UniqueName: "kubernetes.io/local-volume/local-storage11-crc") on node "crc" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.768368 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "d0498339-2dc7-4527-8396-50bbd00b8443" (UID: "d0498339-2dc7-4527-8396-50bbd00b8443"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.769531 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "ddb8da53-e17d-4c8d-a625-0d241d2caafd" (UID: "ddb8da53-e17d-4c8d-a625-0d241d2caafd"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.791962 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.792051 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.792154 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.792193 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.792203 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage11-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage11-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.792212 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.792222 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.792236 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.800054 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.800204 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.800675 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.800756 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.801813 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:15 crc kubenswrapper[5102]: E0123 07:19:15.801908 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.827022 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "66d1a45d-2635-496c-92c1-86e3a686c5b8" (UID: "66d1a45d-2635-496c-92c1-86e3a686c5b8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.854616 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-northd-0_1a44c7a2-d363-4438-b9db-ebd62b910427/ovn-northd/0.log" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.854962 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-northd-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.860839 5102 generic.go:334] "Generic (PLEG): container finished" podID="57f488ce-4b72-40f4-82d8-ad074776c306" containerID="7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc" exitCode=0 Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.860971 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.864834 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-worker-5f48c766d5-kqw8p" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.866307 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/barbican-api-f86b8db9b-zlplv" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.872397 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.875012 5102 generic.go:334] "Generic (PLEG): container finished" podID="6c65ea3f-14be-4130-b116-2291c114323e" containerID="d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" exitCode=0 Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.880631 5102 generic.go:334] "Generic (PLEG): container finished" podID="1893371f-b289-4336-a8ed-1bd78e9191b6" containerID="ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" exitCode=0 Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.882969 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.883438 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/memcached-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.884144 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/placement-5f55c94446-2fcrd" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.884661 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.887218 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-f350-account-create-update-h8rtr" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.887775 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-external-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.888255 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/kube-state-metrics-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.888296 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/glance-default-internal-api-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.888359 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-metadata-0" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.888258 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-gzxp8" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.887216 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.896633 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.896656 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.935885 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6abef536-ae8a-4a68-9c29-87a9af5aaee6" (UID: "6abef536-ae8a-4a68-9c29-87a9af5aaee6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.945333 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-certs" (OuterVolumeSpecName: "kube-state-metrics-tls-certs") pod "ddb8da53-e17d-4c8d-a625-0d241d2caafd" (UID: "ddb8da53-e17d-4c8d-a625-0d241d2caafd"). InnerVolumeSpecName "kube-state-metrics-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.968555 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "84ff9e74-154d-4279-befe-109c03fb7c3b" (UID: "84ff9e74-154d-4279-befe-109c03fb7c3b"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.970774 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-config-data" (OuterVolumeSpecName: "config-data") pod "66d1a45d-2635-496c-92c1-86e3a686c5b8" (UID: "66d1a45d-2635-496c-92c1-86e3a686c5b8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.992652 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "35dab127-50f2-4f30-ba2f-68744d0a6ae8" (UID: "35dab127-50f2-4f30-ba2f-68744d0a6ae8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.996225 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data" (OuterVolumeSpecName: "config-data") pod "6abef536-ae8a-4a68-9c29-87a9af5aaee6" (UID: "6abef536-ae8a-4a68-9c29-87a9af5aaee6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.998871 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.998904 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.998915 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.998969 5102 reconciler_common.go:293] "Volume detached for volume \"kube-state-metrics-tls-certs\" (UniqueName: \"kubernetes.io/secret/ddb8da53-e17d-4c8d-a625-0d241d2caafd-kube-state-metrics-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.999009 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6abef536-ae8a-4a68-9c29-87a9af5aaee6-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:15 crc kubenswrapper[5102]: I0123 07:19:15.999018 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.004328 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "84ff9e74-154d-4279-befe-109c03fb7c3b" (UID: "84ff9e74-154d-4279-befe-109c03fb7c3b"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.016301 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage06-crc" (UniqueName: "kubernetes.io/local-volume/local-storage06-crc") on node "crc" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.032022 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "66d1a45d-2635-496c-92c1-86e3a686c5b8" (UID: "66d1a45d-2635-496c-92c1-86e3a686c5b8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.033308 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-config-data" (OuterVolumeSpecName: "config-data") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.087241 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "84ff9e74-154d-4279-befe-109c03fb7c3b" (UID: "84ff9e74-154d-4279-befe-109c03fb7c3b"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.088259 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.091662 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "57f488ce-4b72-40f4-82d8-ad074776c306" (UID: "57f488ce-4b72-40f4-82d8-ad074776c306"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.097470 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "47725711-7e88-4c25-8016-f70488231203" (UID: "47725711-7e88-4c25-8016-f70488231203"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100483 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage06-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage06-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100513 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100524 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100570 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100585 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100594 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100603 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.100614 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.130809 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-northd-tls-certs" (OuterVolumeSpecName: "ovn-northd-tls-certs") pod "1a44c7a2-d363-4438-b9db-ebd62b910427" (UID: "1a44c7a2-d363-4438-b9db-ebd62b910427"). InnerVolumeSpecName "ovn-northd-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.133801 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "35dab127-50f2-4f30-ba2f-68744d0a6ae8" (UID: "35dab127-50f2-4f30-ba2f-68744d0a6ae8"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.158887 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.173879 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.183815 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-config-data" (OuterVolumeSpecName: "config-data") pod "6c3459b4-efed-4868-8fd0-ffeb07f0100d" (UID: "6c3459b4-efed-4868-8fd0-ffeb07f0100d"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.202747 5102 reconciler_common.go:293] "Volume detached for volume \"ovn-northd-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-ovn-northd-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.202776 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c3459b4-efed-4868-8fd0-ffeb07f0100d-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.202786 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209093 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-northd-0" event={"ID":"1a44c7a2-d363-4438-b9db-ebd62b910427","Type":"ContainerDied","Data":"e564424f1c4d0233e925a77b797ea629df0c96a2b1b0915616fdf829a826b2cb"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209394 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" event={"ID":"57f488ce-4b72-40f4-82d8-ad074776c306","Type":"ContainerDied","Data":"7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209409 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-keystone-listener-57649777bb-wl6hv" event={"ID":"57f488ce-4b72-40f4-82d8-ad074776c306","Type":"ContainerDied","Data":"336f61f7256e8da64e4504e408d7f28b4158e344d35dc10ca4cbc92c0b71073e"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209426 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-worker-5f48c766d5-kqw8p" event={"ID":"6abef536-ae8a-4a68-9c29-87a9af5aaee6","Type":"ContainerDied","Data":"cd4bc9de53c6d3ebbca38bdbd32be0043c5c807fc468bf8946363360d6ae1874"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209440 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/barbican-api-f86b8db9b-zlplv" event={"ID":"84ff9e74-154d-4279-befe-109c03fb7c3b","Type":"ContainerDied","Data":"f0eb75d7f57a59cc28eabe688663cc93fbca89d4f9b8db35dbd7d41944090f4d"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209455 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-api-0" event={"ID":"66d1a45d-2635-496c-92c1-86e3a686c5b8","Type":"ContainerDied","Data":"c206bf1e7550deabf28d51b5a89a13aaeff7642c2c78bcedc20b956283e451df"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209466 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6c65ea3f-14be-4130-b116-2291c114323e","Type":"ContainerDied","Data":"d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209476 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell1-conductor-0" event={"ID":"6c65ea3f-14be-4130-b116-2291c114323e","Type":"ContainerDied","Data":"59b32d5bb78bc24a7466a3a43cd2d5e32a35e2aa5e1fd3029a30083169caef7a"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209487 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1893371f-b289-4336-a8ed-1bd78e9191b6","Type":"ContainerDied","Data":"ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209497 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/nova-cell0-conductor-0" event={"ID":"1893371f-b289-4336-a8ed-1bd78e9191b6","Type":"ContainerDied","Data":"3947eb90e2166603284232579a4b0679a135b85e23b7fee918e28d6ab6d01729"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209505 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/memcached-0" event={"ID":"d0498339-2dc7-4527-8396-50bbd00b8443","Type":"ContainerDied","Data":"202d67de3f27e495bfa00ce900d66d430218f5ccba72ca8ee84ed2602b04b34e"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.209523 5102 scope.go:117] "RemoveContainer" containerID="28fd2f580f926860b97dac693969e9dcc8ef486d9834e279e718727164266b75" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.218480 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.241122 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data" (OuterVolumeSpecName: "config-data") pod "84ff9e74-154d-4279-befe-109c03fb7c3b" (UID: "84ff9e74-154d-4279-befe-109c03fb7c3b"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.253664 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-gzxp8"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.258560 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "35dab127-50f2-4f30-ba2f-68744d0a6ae8" (UID: "35dab127-50f2-4f30-ba2f-68744d0a6ae8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.261975 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-gzxp8"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.270801 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data" (OuterVolumeSpecName: "config-data") pod "57f488ce-4b72-40f4-82d8-ad074776c306" (UID: "57f488ce-4b72-40f4-82d8-ad074776c306"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.285108 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-memcached-tls-certs" (OuterVolumeSpecName: "memcached-tls-certs") pod "d0498339-2dc7-4527-8396-50bbd00b8443" (UID: "d0498339-2dc7-4527-8396-50bbd00b8443"). InnerVolumeSpecName "memcached-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.288697 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-nova-metadata-tls-certs" (OuterVolumeSpecName: "nova-metadata-tls-certs") pod "47725711-7e88-4c25-8016-f70488231203" (UID: "47725711-7e88-4c25-8016-f70488231203"). InnerVolumeSpecName "nova-metadata-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.291735 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-metrics-certs-tls-certs" (OuterVolumeSpecName: "metrics-certs-tls-certs") pod "1a44c7a2-d363-4438-b9db-ebd62b910427" (UID: "1a44c7a2-d363-4438-b9db-ebd62b910427"). InnerVolumeSpecName "metrics-certs-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.292993 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "66d1a45d-2635-496c-92c1-86e3a686c5b8" (UID: "66d1a45d-2635-496c-92c1-86e3a686c5b8"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.296531 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "302ce3d2-72f6-429c-b3cb-16e8fba0d04e" (UID: "302ce3d2-72f6-429c-b3cb-16e8fba0d04e"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.297839 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1a44c7a2-d363-4438-b9db-ebd62b910427" (UID: "1a44c7a2-d363-4438-b9db-ebd62b910427"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.305893 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-config-data\") pod \"1893371f-b289-4336-a8ed-1bd78e9191b6\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.306048 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p75nc\" (UniqueName: \"kubernetes.io/projected/6c65ea3f-14be-4130-b116-2291c114323e-kube-api-access-p75nc\") pod \"6c65ea3f-14be-4130-b116-2291c114323e\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.306081 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bcwvv\" (UniqueName: \"kubernetes.io/projected/1893371f-b289-4336-a8ed-1bd78e9191b6-kube-api-access-bcwvv\") pod \"1893371f-b289-4336-a8ed-1bd78e9191b6\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.306138 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-combined-ca-bundle\") pod \"6c65ea3f-14be-4130-b116-2291c114323e\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.306529 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-config-data\") pod \"6c65ea3f-14be-4130-b116-2291c114323e\" (UID: \"6c65ea3f-14be-4130-b116-2291c114323e\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.306647 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-combined-ca-bundle\") pod \"1893371f-b289-4336-a8ed-1bd78e9191b6\" (UID: \"1893371f-b289-4336-a8ed-1bd78e9191b6\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307154 5102 reconciler_common.go:293] "Volume detached for volume \"nova-metadata-tls-certs\" (UniqueName: \"kubernetes.io/secret/47725711-7e88-4c25-8016-f70488231203-nova-metadata-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307216 5102 reconciler_common.go:293] "Volume detached for volume \"metrics-certs-tls-certs\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-metrics-certs-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307228 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/84ff9e74-154d-4279-befe-109c03fb7c3b-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307254 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/66d1a45d-2635-496c-92c1-86e3a686c5b8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307264 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/302ce3d2-72f6-429c-b3cb-16e8fba0d04e-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307273 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1a44c7a2-d363-4438-b9db-ebd62b910427-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307280 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/35dab127-50f2-4f30-ba2f-68744d0a6ae8-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307289 5102 reconciler_common.go:293] "Volume detached for volume \"memcached-tls-certs\" (UniqueName: \"kubernetes.io/secret/d0498339-2dc7-4527-8396-50bbd00b8443-memcached-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.307339 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/57f488ce-4b72-40f4-82d8-ad074776c306-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.310985 5102 scope.go:117] "RemoveContainer" containerID="fe2fdcbbc8084d5d0d6d55bfdbc85ec6f4dcb7d8044b5db7564aa75cf000dec3" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.318611 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1893371f-b289-4336-a8ed-1bd78e9191b6-kube-api-access-bcwvv" (OuterVolumeSpecName: "kube-api-access-bcwvv") pod "1893371f-b289-4336-a8ed-1bd78e9191b6" (UID: "1893371f-b289-4336-a8ed-1bd78e9191b6"). InnerVolumeSpecName "kube-api-access-bcwvv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.321039 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-f350-account-create-update-h8rtr"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.327221 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-f350-account-create-update-h8rtr"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.334191 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.334842 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6c65ea3f-14be-4130-b116-2291c114323e-kube-api-access-p75nc" (OuterVolumeSpecName: "kube-api-access-p75nc") pod "6c65ea3f-14be-4130-b116-2291c114323e" (UID: "6c65ea3f-14be-4130-b116-2291c114323e"). InnerVolumeSpecName "kube-api-access-p75nc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.339844 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1893371f-b289-4336-a8ed-1bd78e9191b6" (UID: "1893371f-b289-4336-a8ed-1bd78e9191b6"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.340617 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-config-data" (OuterVolumeSpecName: "config-data") pod "1893371f-b289-4336-a8ed-1bd78e9191b6" (UID: "1893371f-b289-4336-a8ed-1bd78e9191b6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.340770 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.347259 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-worker-5f48c766d5-kqw8p"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.372076 5102 scope.go:117] "RemoveContainer" containerID="7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.374702 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-worker-5f48c766d5-kqw8p"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.382466 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "6c65ea3f-14be-4130-b116-2291c114323e" (UID: "6c65ea3f-14be-4130-b116-2291c114323e"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.387411 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-config-data" (OuterVolumeSpecName: "config-data") pod "6c65ea3f-14be-4130-b116-2291c114323e" (UID: "6c65ea3f-14be-4130-b116-2291c114323e"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.415937 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.415969 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.415979 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/1893371f-b289-4336-a8ed-1bd78e9191b6-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: E0123 07:19:16.415979 5102 configmap.go:193] Couldn't get configMap openstack/openstack-scripts: configmap "openstack-scripts" not found Jan 23 07:19:16 crc kubenswrapper[5102]: E0123 07:19:16.416044 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts podName:f6b9e803-e21c-4609-ab6d-8e54e7cbe816 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:18.416026056 +0000 UTC m=+1509.236375021 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "operator-scripts" (UniqueName: "kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts") pod "root-account-create-update-xrljw" (UID: "f6b9e803-e21c-4609-ab6d-8e54e7cbe816") : configmap "openstack-scripts" not found Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.415988 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p75nc\" (UniqueName: \"kubernetes.io/projected/6c65ea3f-14be-4130-b116-2291c114323e-kube-api-access-p75nc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.416401 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bcwvv\" (UniqueName: \"kubernetes.io/projected/1893371f-b289-4336-a8ed-1bd78e9191b6-kube-api-access-bcwvv\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.416415 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/6c65ea3f-14be-4130-b116-2291c114323e-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.419084 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.430592 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/kube-state-metrics-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.437866 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.447614 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.477070 5102 scope.go:117] "RemoveContainer" containerID="0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.477295 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-external-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.514756 5102 scope.go:117] "RemoveContainer" containerID="7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc" Jan 23 07:19:16 crc kubenswrapper[5102]: E0123 07:19:16.515170 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc\": container with ID starting with 7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc not found: ID does not exist" containerID="7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.515233 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc"} err="failed to get container status \"7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc\": rpc error: code = NotFound desc = could not find container \"7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc\": container with ID starting with 7ca6418122b65ad1291e200dbbaee52d7b2e9e35a62641a60f168aa5a918bccc not found: ID does not exist" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.515264 5102 scope.go:117] "RemoveContainer" containerID="0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577" Jan 23 07:19:16 crc kubenswrapper[5102]: E0123 07:19:16.515767 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577\": container with ID starting with 0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577 not found: ID does not exist" containerID="0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.516600 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577"} err="failed to get container status \"0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577\": rpc error: code = NotFound desc = could not find container \"0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577\": container with ID starting with 0972fb31ddaeb302af69485ace5b2085305a01671ca61efd67807da334b76577 not found: ID does not exist" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.516616 5102 scope.go:117] "RemoveContainer" containerID="d878afc3004c8f5f3e7c7a5b43603e184202681b56d93e9496c6bc1f56835ac3" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.517996 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts\") pod \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.518065 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5lq85\" (UniqueName: \"kubernetes.io/projected/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-kube-api-access-5lq85\") pod \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\" (UID: \"f6b9e803-e21c-4609-ab6d-8e54e7cbe816\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.519096 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "f6b9e803-e21c-4609-ab6d-8e54e7cbe816" (UID: "f6b9e803-e21c-4609-ab6d-8e54e7cbe816"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.530074 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-kube-api-access-5lq85" (OuterVolumeSpecName: "kube-api-access-5lq85") pod "f6b9e803-e21c-4609-ab6d-8e54e7cbe816" (UID: "f6b9e803-e21c-4609-ab6d-8e54e7cbe816"). InnerVolumeSpecName "kube-api-access-5lq85". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.547831 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-keystone-listener-57649777bb-wl6hv"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.564629 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-keystone-listener-57649777bb-wl6hv"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.573589 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-northd-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.580967 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-northd-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.629595 5102 scope.go:117] "RemoveContainer" containerID="6070b8a73f605944bd3a15d8db62c51a5d73dffd518f94ec4f6ed403ec5ef669" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.631780 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5lq85\" (UniqueName: \"kubernetes.io/projected/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-kube-api-access-5lq85\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.631832 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/f6b9e803-e21c-4609-ab6d-8e54e7cbe816-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.648391 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/barbican-api-f86b8db9b-zlplv"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.665778 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/barbican-api-f86b8db9b-zlplv"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.674550 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/placement-5f55c94446-2fcrd"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.696150 5102 scope.go:117] "RemoveContainer" containerID="da77c21b9df506f687f044080259f2d216b0315a1410a1b0676e52084c699b33" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.696359 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/placement-5f55c94446-2fcrd"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.738620 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.751550 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/glance-default-internal-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.757985 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.764653 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-api-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.771698 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/memcached-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.773175 5102 scope.go:117] "RemoveContainer" containerID="34cd1354e2d8d1a8790a5b6bcf27425452892c21859624035093db53b7f4bf45" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.778748 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/memcached-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.795585 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.813057 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-metadata-0"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.833854 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.848937 5102 scope.go:117] "RemoveContainer" containerID="f4d4bf4c2380f1096c71e372015b67fe76544c3993ebfdd80decba57527e35ae" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.888450 5102 scope.go:117] "RemoveContainer" containerID="c00e7ef9f103d8dba5d16c6b74a391b96b09bc3b06eaa21681a7b180182aaf05" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.901682 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/root-account-create-update-xrljw" event={"ID":"f6b9e803-e21c-4609-ab6d-8e54e7cbe816","Type":"ContainerDied","Data":"e9a202e6c15d394df95bd3e24d3feabeefd0ae036baeced331e8bd73dc6b64fc"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.901760 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/root-account-create-update-xrljw" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.910669 5102 generic.go:334] "Generic (PLEG): container finished" podID="1ede537b-39d8-483c-9a2d-4ace36319060" containerID="80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8" exitCode=0 Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.910729 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/openstack-galera-0" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.910779 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-8ed5-account-create-update-bdjvv" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.910835 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1ede537b-39d8-483c-9a2d-4ace36319060","Type":"ContainerDied","Data":"80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.910881 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/openstack-galera-0" event={"ID":"1ede537b-39d8-483c-9a2d-4ace36319060","Type":"ContainerDied","Data":"0614d0b2ed67db0f09f8d59a72622b249e926e0294d6c40016ccf2f83dfbec87"} Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.910959 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell1-conductor-0" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.911417 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/nova-cell0-conductor-0" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.924456 5102 scope.go:117] "RemoveContainer" containerID="d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938147 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mysql-db\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938602 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-kolla-config\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938671 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-default\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938691 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxxtl\" (UniqueName: \"kubernetes.io/projected/1ede537b-39d8-483c-9a2d-4ace36319060-kube-api-access-kxxtl\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938757 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-combined-ca-bundle\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938816 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-operator-scripts\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938855 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-galera-tls-certs\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.938915 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-generated\") pod \"1ede537b-39d8-483c-9a2d-4ace36319060\" (UID: \"1ede537b-39d8-483c-9a2d-4ace36319060\") " Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.941168 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-generated" (OuterVolumeSpecName: "config-data-generated") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "config-data-generated". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.941489 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-operator-scripts" (OuterVolumeSpecName: "operator-scripts") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "operator-scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.942490 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-default" (OuterVolumeSpecName: "config-data-default") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "config-data-default". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.942816 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-kolla-config" (OuterVolumeSpecName: "kolla-config") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "kolla-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.954766 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ede537b-39d8-483c-9a2d-4ace36319060-kube-api-access-kxxtl" (OuterVolumeSpecName: "kube-api-access-kxxtl") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "kube-api-access-kxxtl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.963061 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/root-account-create-update-xrljw"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.964409 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage03-crc" (OuterVolumeSpecName: "mysql-db") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "local-storage03-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.969125 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/root-account-create-update-xrljw"] Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.984022 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.995782 5102 scope.go:117] "RemoveContainer" containerID="d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" Jan 23 07:19:16 crc kubenswrapper[5102]: E0123 07:19:16.996438 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7\": container with ID starting with d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7 not found: ID does not exist" containerID="d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.996492 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7"} err="failed to get container status \"d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7\": rpc error: code = NotFound desc = could not find container \"d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7\": container with ID starting with d1fd5aa41a93777b70c132b200142359aa9f5b98ea5e207bb9f22d97afdc05f7 not found: ID does not exist" Jan 23 07:19:16 crc kubenswrapper[5102]: I0123 07:19:16.996643 5102 scope.go:117] "RemoveContainer" containerID="ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.032148 5102 scope.go:117] "RemoveContainer" containerID="ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" Jan 23 07:19:17 crc kubenswrapper[5102]: E0123 07:19:17.034023 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605\": container with ID starting with ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605 not found: ID does not exist" containerID="ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.034068 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605"} err="failed to get container status \"ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605\": rpc error: code = NotFound desc = could not find container \"ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605\": container with ID starting with ba94f4284a892eeccc7f71327d741b59cffd59cc4c2b70941f0d71b850ab8605 not found: ID does not exist" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.034101 5102 scope.go:117] "RemoveContainer" containerID="88612d72f56f1267bf785aa1c9978b748c76a6d93eedc7a0d71e8ed0f2faec38" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.043526 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.044396 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-default\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-default\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.044432 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxxtl\" (UniqueName: \"kubernetes.io/projected/1ede537b-39d8-483c-9a2d-4ace36319060-kube-api-access-kxxtl\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.044445 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.044454 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.044464 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-generated\" (UniqueName: \"kubernetes.io/empty-dir/1ede537b-39d8-483c-9a2d-4ace36319060-config-data-generated\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.044487 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.044497 5102 reconciler_common.go:293] "Volume detached for volume \"kolla-config\" (UniqueName: \"kubernetes.io/configmap/1ede537b-39d8-483c-9a2d-4ace36319060-kolla-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.056054 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell1-conductor-0"] Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.068015 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-galera-tls-certs" (OuterVolumeSpecName: "galera-tls-certs") pod "1ede537b-39d8-483c-9a2d-4ace36319060" (UID: "1ede537b-39d8-483c-9a2d-4ace36319060"). InnerVolumeSpecName "galera-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.091573 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage03-crc" (UniqueName: "kubernetes.io/local-volume/local-storage03-crc") on node "crc" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.092576 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-8ed5-account-create-update-bdjvv"] Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.104350 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-8ed5-account-create-update-bdjvv"] Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.110402 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.111773 5102 scope.go:117] "RemoveContainer" containerID="80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.117467 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/nova-cell0-conductor-0"] Jan 23 07:19:17 crc kubenswrapper[5102]: E0123 07:19:17.146020 5102 configmap.go:193] Couldn't get configMap openstack/rabbitmq-cell1-config-data: configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:17 crc kubenswrapper[5102]: E0123 07:19:17.146092 5102 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data podName:1ea732e7-d11d-4e12-9d44-f8fcafa50de5 nodeName:}" failed. No retries permitted until 2026-01-23 07:19:25.146072795 +0000 UTC m=+1515.966421770 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "config-data" (UniqueName: "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data") pod "rabbitmq-cell1-server-0" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5") : configmap "rabbitmq-cell1-config-data" not found Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.146502 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage03-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage03-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.146517 5102 reconciler_common.go:293] "Volume detached for volume \"galera-tls-certs\" (UniqueName: \"kubernetes.io/secret/1ede537b-39d8-483c-9a2d-4ace36319060-galera-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.180469 5102 scope.go:117] "RemoveContainer" containerID="433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.214807 5102 scope.go:117] "RemoveContainer" containerID="80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8" Jan 23 07:19:17 crc kubenswrapper[5102]: E0123 07:19:17.215721 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8\": container with ID starting with 80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8 not found: ID does not exist" containerID="80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.215794 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8"} err="failed to get container status \"80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8\": rpc error: code = NotFound desc = could not find container \"80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8\": container with ID starting with 80639ffff087c66a76274797ca1622d2b485c1533fadb006c1ccb0249ce25ce8 not found: ID does not exist" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.215852 5102 scope.go:117] "RemoveContainer" containerID="433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c" Jan 23 07:19:17 crc kubenswrapper[5102]: E0123 07:19:17.216365 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c\": container with ID starting with 433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c not found: ID does not exist" containerID="433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.216388 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c"} err="failed to get container status \"433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c\": rpc error: code = NotFound desc = could not find container \"433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c\": container with ID starting with 433252565f07dc50a09aed6e1c4e01a887475b2aff9868b19b785dff5c1f818c not found: ID does not exist" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.247747 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/openstack-galera-0"] Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.248181 5102 reconciler_common.go:293] "Volume detached for volume \"operator-scripts\" (UniqueName: \"kubernetes.io/configmap/813e0bf4-6d9a-472b-950f-2cfe65163e22-operator-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.248257 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4qgzn\" (UniqueName: \"kubernetes.io/projected/813e0bf4-6d9a-472b-950f-2cfe65163e22-kube-api-access-4qgzn\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.255768 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/openstack-galera-0"] Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.443335 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554190 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-pod-info\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554257 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-erlang-cookie\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554288 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-plugins-conf\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554310 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-server-conf\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554369 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-plugins\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554386 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-erlang-cookie-secret\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554407 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-confd\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554425 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qkxwl\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-kube-api-access-qkxwl\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554471 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.554511 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.555142 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.555186 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.555232 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-tls\") pod \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\" (UID: \"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.556438 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.556507 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.556529 5102 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.559124 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.559917 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-kube-api-access-qkxwl" (OuterVolumeSpecName: "kube-api-access-qkxwl") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "kube-api-access-qkxwl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.561002 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage01-crc" (OuterVolumeSpecName: "persistence") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "local-storage01-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.562128 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.564235 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-pod-info" (OuterVolumeSpecName: "pod-info") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.578451 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data" (OuterVolumeSpecName: "config-data") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.607114 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-server-conf" (OuterVolumeSpecName: "server-conf") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.615560 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1893371f-b289-4336-a8ed-1bd78e9191b6" path="/var/lib/kubelet/pods/1893371f-b289-4336-a8ed-1bd78e9191b6/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.616601 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" path="/var/lib/kubelet/pods/1a44c7a2-d363-4438-b9db-ebd62b910427/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.617801 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" path="/var/lib/kubelet/pods/1ede537b-39d8-483c-9a2d-4ace36319060/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.619575 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="278bc0a5-d40a-4983-b8bd-ae5b8e6af12d" path="/var/lib/kubelet/pods/278bc0a5-d40a-4983-b8bd-ae5b8e6af12d/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.620363 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" path="/var/lib/kubelet/pods/302ce3d2-72f6-429c-b3cb-16e8fba0d04e/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.621536 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" path="/var/lib/kubelet/pods/35dab127-50f2-4f30-ba2f-68744d0a6ae8/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.623505 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="47725711-7e88-4c25-8016-f70488231203" path="/var/lib/kubelet/pods/47725711-7e88-4c25-8016-f70488231203/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.624571 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" path="/var/lib/kubelet/pods/57f488ce-4b72-40f4-82d8-ad074776c306/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.627125 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" path="/var/lib/kubelet/pods/66d1a45d-2635-496c-92c1-86e3a686c5b8/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.628321 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" path="/var/lib/kubelet/pods/6abef536-ae8a-4a68-9c29-87a9af5aaee6/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.630219 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" path="/var/lib/kubelet/pods/6c3459b4-efed-4868-8fd0-ffeb07f0100d/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.633131 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6c65ea3f-14be-4130-b116-2291c114323e" path="/var/lib/kubelet/pods/6c65ea3f-14be-4130-b116-2291c114323e/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.636846 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="813e0bf4-6d9a-472b-950f-2cfe65163e22" path="/var/lib/kubelet/pods/813e0bf4-6d9a-472b-950f-2cfe65163e22/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.638126 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" path="/var/lib/kubelet/pods/84ff9e74-154d-4279-befe-109c03fb7c3b/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.639207 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="91ab8988-66ac-4643-b729-76d2575d0ad0" path="/var/lib/kubelet/pods/91ab8988-66ac-4643-b729-76d2575d0ad0/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.640219 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" path="/var/lib/kubelet/pods/bd9d4f50-cf1a-4235-8fc5-502b4a488cb8/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.642417 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d0498339-2dc7-4527-8396-50bbd00b8443" path="/var/lib/kubelet/pods/d0498339-2dc7-4527-8396-50bbd00b8443/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.643576 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ddb8da53-e17d-4c8d-a625-0d241d2caafd" path="/var/lib/kubelet/pods/ddb8da53-e17d-4c8d-a625-0d241d2caafd/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.644511 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f6b9e803-e21c-4609-ab6d-8e54e7cbe816" path="/var/lib/kubelet/pods/f6b9e803-e21c-4609-ab6d-8e54e7cbe816/volumes" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.654815 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" (UID: "f4fc3e1d-5fac-4696-a8eb-709db37b5ff6"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.657728 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.657841 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.657909 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.657967 5102 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-pod-info\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.658019 5102 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-server-conf\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.658110 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.658167 5102 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.658222 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.658281 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qkxwl\" (UniqueName: \"kubernetes.io/projected/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6-kube-api-access-qkxwl\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.684181 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage01-crc" (UniqueName: "kubernetes.io/local-volume/local-storage01-crc") on node "crc" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.741804 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.760331 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage01-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage01-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.861748 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-internal-tls-certs\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.861869 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-scripts\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.861905 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-public-tls-certs\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.861947 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-config-data\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.862051 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-credential-keys\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.862100 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hd2vf\" (UniqueName: \"kubernetes.io/projected/8dfe2011-cf9e-413e-b53a-c7ff73f81161-kube-api-access-hd2vf\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.862173 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-combined-ca-bundle\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.862233 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-fernet-keys\") pod \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\" (UID: \"8dfe2011-cf9e-413e-b53a-c7ff73f81161\") " Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.867796 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-scripts" (OuterVolumeSpecName: "scripts") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.869115 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-credential-keys" (OuterVolumeSpecName: "credential-keys") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "credential-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.869162 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-fernet-keys" (OuterVolumeSpecName: "fernet-keys") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "fernet-keys". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.870817 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8dfe2011-cf9e-413e-b53a-c7ff73f81161-kube-api-access-hd2vf" (OuterVolumeSpecName: "kube-api-access-hd2vf") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "kube-api-access-hd2vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.894673 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-config-data" (OuterVolumeSpecName: "config-data") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.902334 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.916127 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.933133 5102 generic.go:334] "Generic (PLEG): container finished" podID="8dfe2011-cf9e-413e-b53a-c7ff73f81161" containerID="32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f" exitCode=0 Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.933194 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-756757b6f5-klql8" event={"ID":"8dfe2011-cf9e-413e-b53a-c7ff73f81161","Type":"ContainerDied","Data":"32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f"} Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.933221 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/keystone-756757b6f5-klql8" event={"ID":"8dfe2011-cf9e-413e-b53a-c7ff73f81161","Type":"ContainerDied","Data":"78f848b352e4057fe24fef652e86c0afbc064cf60ecb60cf471919c13cf57af0"} Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.933237 5102 scope.go:117] "RemoveContainer" containerID="32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.933297 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/keystone-756757b6f5-klql8" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.946651 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "8dfe2011-cf9e-413e-b53a-c7ff73f81161" (UID: "8dfe2011-cf9e-413e-b53a-c7ff73f81161"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.948400 5102 generic.go:334] "Generic (PLEG): container finished" podID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerID="5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec" exitCode=0 Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.948422 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6","Type":"ContainerDied","Data":"5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec"} Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.948952 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-server-0" event={"ID":"f4fc3e1d-5fac-4696-a8eb-709db37b5ff6","Type":"ContainerDied","Data":"751d5e8d8d75c2615358a9dfdc133b7afdcfdbfd251107083f8d63ea5c0c976d"} Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.948499 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-server-0" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.953864 5102 generic.go:334] "Generic (PLEG): container finished" podID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerID="33b3d6f15adbf2ba58af4031167e04bf38158518432643d8d72a903641549c7a" exitCode=0 Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.953914 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1ea732e7-d11d-4e12-9d44-f8fcafa50de5","Type":"ContainerDied","Data":"33b3d6f15adbf2ba58af4031167e04bf38158518432643d8d72a903641549c7a"} Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966124 5102 reconciler_common.go:293] "Volume detached for volume \"credential-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-credential-keys\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966146 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hd2vf\" (UniqueName: \"kubernetes.io/projected/8dfe2011-cf9e-413e-b53a-c7ff73f81161-kube-api-access-hd2vf\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966155 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966165 5102 reconciler_common.go:293] "Volume detached for volume \"fernet-keys\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-fernet-keys\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966173 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966181 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966189 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.966197 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/8dfe2011-cf9e-413e-b53a-c7ff73f81161-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:17 crc kubenswrapper[5102]: I0123 07:19:17.997251 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.004209 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-server-0"] Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.005966 5102 scope.go:117] "RemoveContainer" containerID="32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f" Jan 23 07:19:18 crc kubenswrapper[5102]: E0123 07:19:18.007582 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f\": container with ID starting with 32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f not found: ID does not exist" containerID="32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.007632 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f"} err="failed to get container status \"32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f\": rpc error: code = NotFound desc = could not find container \"32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f\": container with ID starting with 32293f694ee1b5afaf2ac47d34cdd0c0713ca4db8085c66bc8e28505c896299f not found: ID does not exist" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.007659 5102 scope.go:117] "RemoveContainer" containerID="5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.031851 5102 scope.go:117] "RemoveContainer" containerID="2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.061914 5102 scope.go:117] "RemoveContainer" containerID="5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec" Jan 23 07:19:18 crc kubenswrapper[5102]: E0123 07:19:18.062419 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec\": container with ID starting with 5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec not found: ID does not exist" containerID="5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.062488 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec"} err="failed to get container status \"5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec\": rpc error: code = NotFound desc = could not find container \"5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec\": container with ID starting with 5a427fb25005ad6e8ba7b3634b76daba593e7cc09200afa43ff60d364b8e15ec not found: ID does not exist" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.062531 5102 scope.go:117] "RemoveContainer" containerID="2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5" Jan 23 07:19:18 crc kubenswrapper[5102]: E0123 07:19:18.064829 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5\": container with ID starting with 2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5 not found: ID does not exist" containerID="2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.064874 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5"} err="failed to get container status \"2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5\": rpc error: code = NotFound desc = could not find container \"2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5\": container with ID starting with 2c65825b8ad8b7cf31d166cab7a255e647aad826a8e98ebc53f3d9d9194b52a5 not found: ID does not exist" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.272616 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/keystone-756757b6f5-klql8"] Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.277875 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/keystone-756757b6f5-klql8"] Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.412498 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481477 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-server-conf\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481558 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-plugins-conf\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481640 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"persistence\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481660 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4gbz9\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-kube-api-access-4gbz9\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481679 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-confd\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481731 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-pod-info\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481803 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-erlang-cookie\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481827 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481846 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-tls\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481862 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-erlang-cookie-secret\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.481881 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-plugins\") pod \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\" (UID: \"1ea732e7-d11d-4e12-9d44-f8fcafa50de5\") " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.484651 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-plugins" (OuterVolumeSpecName: "rabbitmq-plugins") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "rabbitmq-plugins". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.484862 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-plugins-conf" (OuterVolumeSpecName: "plugins-conf") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "plugins-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.485424 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-erlang-cookie" (OuterVolumeSpecName: "rabbitmq-erlang-cookie") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "rabbitmq-erlang-cookie". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.486114 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage07-crc" (OuterVolumeSpecName: "persistence") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "local-storage07-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.489824 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/downward-api/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-pod-info" (OuterVolumeSpecName: "pod-info") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "pod-info". PluginName "kubernetes.io/downward-api", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.490113 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-tls" (OuterVolumeSpecName: "rabbitmq-tls") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "rabbitmq-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.494668 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-erlang-cookie-secret" (OuterVolumeSpecName: "erlang-cookie-secret") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "erlang-cookie-secret". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.494875 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-kube-api-access-4gbz9" (OuterVolumeSpecName: "kube-api-access-4gbz9") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "kube-api-access-4gbz9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.505380 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data" (OuterVolumeSpecName: "config-data") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.531467 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-server-conf" (OuterVolumeSpecName: "server-conf") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "server-conf". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.582207 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-confd" (OuterVolumeSpecName: "rabbitmq-confd") pod "1ea732e7-d11d-4e12-9d44-f8fcafa50de5" (UID: "1ea732e7-d11d-4e12-9d44-f8fcafa50de5"). InnerVolumeSpecName "rabbitmq-confd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583131 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-erlang-cookie\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-erlang-cookie\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583167 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583179 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-tls\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-tls\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583191 5102 reconciler_common.go:293] "Volume detached for volume \"erlang-cookie-secret\" (UniqueName: \"kubernetes.io/secret/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-erlang-cookie-secret\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583202 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-plugins\" (UniqueName: \"kubernetes.io/empty-dir/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-plugins\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583213 5102 reconciler_common.go:293] "Volume detached for volume \"server-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-server-conf\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583224 5102 reconciler_common.go:293] "Volume detached for volume \"plugins-conf\" (UniqueName: \"kubernetes.io/configmap/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-plugins-conf\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583252 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" " Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583264 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4gbz9\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-kube-api-access-4gbz9\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583274 5102 reconciler_common.go:293] "Volume detached for volume \"rabbitmq-confd\" (UniqueName: \"kubernetes.io/projected/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-rabbitmq-confd\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.583285 5102 reconciler_common.go:293] "Volume detached for volume \"pod-info\" (UniqueName: \"kubernetes.io/downward-api/1ea732e7-d11d-4e12-9d44-f8fcafa50de5-pod-info\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.606308 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage07-crc" (UniqueName: "kubernetes.io/local-volume/local-storage07-crc") on node "crc" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.684889 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage07-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage07-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.937969 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.979137 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/rabbitmq-cell1-server-0" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.979403 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/rabbitmq-cell1-server-0" event={"ID":"1ea732e7-d11d-4e12-9d44-f8fcafa50de5","Type":"ContainerDied","Data":"a4627277241a6e04a36fd1dcf209a91d2523f1fbf8a1c0593cc639bf07bb470a"} Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.979449 5102 scope.go:117] "RemoveContainer" containerID="33b3d6f15adbf2ba58af4031167e04bf38158518432643d8d72a903641549c7a" Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.992657 5102 generic.go:334] "Generic (PLEG): container finished" podID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerID="fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb" exitCode=0 Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.992710 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c08d6c5-8422-4da2-b8f3-2760dbebc521","Type":"ContainerDied","Data":"fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb"} Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.992742 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/cinder-scheduler-0" event={"ID":"4c08d6c5-8422-4da2-b8f3-2760dbebc521","Type":"ContainerDied","Data":"497109b2c03c3b0d9b1d174cd2e20c0402ffd455df531f23e9b86427e785e888"} Jan 23 07:19:18 crc kubenswrapper[5102]: I0123 07:19:18.992809 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/cinder-scheduler-0" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.007435 5102 scope.go:117] "RemoveContainer" containerID="dac1e501d0f018ad7a331fa5911c1ac5b2f12ea3b755131154923e975ef2f708" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.021869 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.036684 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/rabbitmq-cell1-server-0"] Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.045266 5102 scope.go:117] "RemoveContainer" containerID="d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.066121 5102 scope.go:117] "RemoveContainer" containerID="fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.082456 5102 scope.go:117] "RemoveContainer" containerID="d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44" Jan 23 07:19:19 crc kubenswrapper[5102]: E0123 07:19:19.082910 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44\": container with ID starting with d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44 not found: ID does not exist" containerID="d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.082953 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44"} err="failed to get container status \"d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44\": rpc error: code = NotFound desc = could not find container \"d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44\": container with ID starting with d3c6e27ec48f9e56962bb92bd74fe83d4cf4e0500e8d22fc5ee36a4e22d5aa44 not found: ID does not exist" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.082983 5102 scope.go:117] "RemoveContainer" containerID="fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb" Jan 23 07:19:19 crc kubenswrapper[5102]: E0123 07:19:19.083393 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb\": container with ID starting with fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb not found: ID does not exist" containerID="fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.083428 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb"} err="failed to get container status \"fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb\": rpc error: code = NotFound desc = could not find container \"fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb\": container with ID starting with fb334cc4d7a078c75ef2d2d8debe12339638ec76effd8f7a39b5cbcc1970d8eb not found: ID does not exist" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.093668 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c08d6c5-8422-4da2-b8f3-2760dbebc521-etc-machine-id\") pod \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.093780 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-scripts\") pod \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.093817 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4c08d6c5-8422-4da2-b8f3-2760dbebc521-etc-machine-id" (OuterVolumeSpecName: "etc-machine-id") pod "4c08d6c5-8422-4da2-b8f3-2760dbebc521" (UID: "4c08d6c5-8422-4da2-b8f3-2760dbebc521"). InnerVolumeSpecName "etc-machine-id". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.093863 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-combined-ca-bundle\") pod \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.093900 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data\") pod \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.093944 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2nc6p\" (UniqueName: \"kubernetes.io/projected/4c08d6c5-8422-4da2-b8f3-2760dbebc521-kube-api-access-2nc6p\") pod \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.093997 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data-custom\") pod \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\" (UID: \"4c08d6c5-8422-4da2-b8f3-2760dbebc521\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.094298 5102 reconciler_common.go:293] "Volume detached for volume \"etc-machine-id\" (UniqueName: \"kubernetes.io/host-path/4c08d6c5-8422-4da2-b8f3-2760dbebc521-etc-machine-id\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.097998 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data-custom" (OuterVolumeSpecName: "config-data-custom") pod "4c08d6c5-8422-4da2-b8f3-2760dbebc521" (UID: "4c08d6c5-8422-4da2-b8f3-2760dbebc521"). InnerVolumeSpecName "config-data-custom". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.098738 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-scripts" (OuterVolumeSpecName: "scripts") pod "4c08d6c5-8422-4da2-b8f3-2760dbebc521" (UID: "4c08d6c5-8422-4da2-b8f3-2760dbebc521"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.099577 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4c08d6c5-8422-4da2-b8f3-2760dbebc521-kube-api-access-2nc6p" (OuterVolumeSpecName: "kube-api-access-2nc6p") pod "4c08d6c5-8422-4da2-b8f3-2760dbebc521" (UID: "4c08d6c5-8422-4da2-b8f3-2760dbebc521"). InnerVolumeSpecName "kube-api-access-2nc6p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.131759 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "4c08d6c5-8422-4da2-b8f3-2760dbebc521" (UID: "4c08d6c5-8422-4da2-b8f3-2760dbebc521"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.170698 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data" (OuterVolumeSpecName: "config-data") pod "4c08d6c5-8422-4da2-b8f3-2760dbebc521" (UID: "4c08d6c5-8422-4da2-b8f3-2760dbebc521"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.195283 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2nc6p\" (UniqueName: \"kubernetes.io/projected/4c08d6c5-8422-4da2-b8f3-2760dbebc521-kube-api-access-2nc6p\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.195323 5102 reconciler_common.go:293] "Volume detached for volume \"config-data-custom\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data-custom\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.195335 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.195346 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.195356 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/4c08d6c5-8422-4da2-b8f3-2760dbebc521-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.349351 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.359311 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/cinder-scheduler-0"] Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.611503 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" path="/var/lib/kubelet/pods/1ea732e7-d11d-4e12-9d44-f8fcafa50de5/volumes" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.612399 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" path="/var/lib/kubelet/pods/4c08d6c5-8422-4da2-b8f3-2760dbebc521/volumes" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.613117 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8dfe2011-cf9e-413e-b53a-c7ff73f81161" path="/var/lib/kubelet/pods/8dfe2011-cf9e-413e-b53a-c7ff73f81161/volumes" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.614814 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" path="/var/lib/kubelet/pods/f4fc3e1d-5fac-4696-a8eb-709db37b5ff6/volumes" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.623523 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703012 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-combined-ca-bundle\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703101 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xndbv\" (UniqueName: \"kubernetes.io/projected/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-kube-api-access-xndbv\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703155 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-config-data\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703193 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-ceilometer-tls-certs\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703224 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-run-httpd\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703252 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-sg-core-conf-yaml\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703304 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-scripts\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703347 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-log-httpd\") pod \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\" (UID: \"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8\") " Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.703654 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-run-httpd" (OuterVolumeSpecName: "run-httpd") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "run-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.704132 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-log-httpd" (OuterVolumeSpecName: "log-httpd") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "log-httpd". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.707518 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-kube-api-access-xndbv" (OuterVolumeSpecName: "kube-api-access-xndbv") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "kube-api-access-xndbv". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.711044 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-scripts" (OuterVolumeSpecName: "scripts") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.726457 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-sg-core-conf-yaml" (OuterVolumeSpecName: "sg-core-conf-yaml") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "sg-core-conf-yaml". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.768513 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-ceilometer-tls-certs" (OuterVolumeSpecName: "ceilometer-tls-certs") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "ceilometer-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.770647 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.770952 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-config-data" (OuterVolumeSpecName: "config-data") pod "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" (UID: "e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8"). InnerVolumeSpecName "config-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805101 5102 reconciler_common.go:293] "Volume detached for volume \"run-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-run-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805150 5102 reconciler_common.go:293] "Volume detached for volume \"sg-core-conf-yaml\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-sg-core-conf-yaml\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805176 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805195 5102 reconciler_common.go:293] "Volume detached for volume \"log-httpd\" (UniqueName: \"kubernetes.io/empty-dir/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-log-httpd\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805211 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805228 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xndbv\" (UniqueName: \"kubernetes.io/projected/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-kube-api-access-xndbv\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805244 5102 reconciler_common.go:293] "Volume detached for volume \"config-data\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-config-data\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:19 crc kubenswrapper[5102]: I0123 07:19:19.805259 5102 reconciler_common.go:293] "Volume detached for volume \"ceilometer-tls-certs\" (UniqueName: \"kubernetes.io/secret/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8-ceilometer-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.007802 5102 generic.go:334] "Generic (PLEG): container finished" podID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerID="440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222" exitCode=0 Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.007873 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ceilometer-0" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.007896 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerDied","Data":"440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222"} Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.009408 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ceilometer-0" event={"ID":"e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8","Type":"ContainerDied","Data":"c42a481bce5f05cab46712eff369c5a893db541a095b4db08fc1d5e3e516ab58"} Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.009441 5102 scope.go:117] "RemoveContainer" containerID="7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.065411 5102 scope.go:117] "RemoveContainer" containerID="d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.065620 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.071053 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ceilometer-0"] Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.088299 5102 scope.go:117] "RemoveContainer" containerID="440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.112923 5102 scope.go:117] "RemoveContainer" containerID="3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.134764 5102 scope.go:117] "RemoveContainer" containerID="7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919" Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.135259 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919\": container with ID starting with 7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919 not found: ID does not exist" containerID="7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.135300 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919"} err="failed to get container status \"7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919\": rpc error: code = NotFound desc = could not find container \"7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919\": container with ID starting with 7e76545707e9863d9eaf8b921101ff1a8e1c5507cf567803ffb93a826e756919 not found: ID does not exist" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.135339 5102 scope.go:117] "RemoveContainer" containerID="d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8" Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.135672 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8\": container with ID starting with d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8 not found: ID does not exist" containerID="d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.135712 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8"} err="failed to get container status \"d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8\": rpc error: code = NotFound desc = could not find container \"d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8\": container with ID starting with d68a88be2d7cf494e00ff77e605fe9c89c6dffa61086aef807b48043b70ab9b8 not found: ID does not exist" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.135754 5102 scope.go:117] "RemoveContainer" containerID="440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222" Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.136088 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222\": container with ID starting with 440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222 not found: ID does not exist" containerID="440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.136132 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222"} err="failed to get container status \"440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222\": rpc error: code = NotFound desc = could not find container \"440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222\": container with ID starting with 440873b7ad1de31793ce1f7df54c3488b81e25b6900ea4b1aeb4b844ba19b222 not found: ID does not exist" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.136149 5102 scope.go:117] "RemoveContainer" containerID="3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc" Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.136524 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc\": container with ID starting with 3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc not found: ID does not exist" containerID="3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc" Jan 23 07:19:20 crc kubenswrapper[5102]: I0123 07:19:20.136617 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc"} err="failed to get container status \"3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc\": rpc error: code = NotFound desc = could not find container \"3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc\": container with ID starting with 3dec540494435ccfc2f7bb44260a5edfd89aec347a72e7f53b8c19acfda965cc not found: ID does not exist" Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.791406 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.792262 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.792334 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.793788 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.793846 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.794654 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.796139 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:20 crc kubenswrapper[5102]: E0123 07:19:20.796210 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:21 crc kubenswrapper[5102]: I0123 07:19:21.612885 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" path="/var/lib/kubelet/pods/e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8/volumes" Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.790232 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.790781 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.791293 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.791364 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.791928 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.793321 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.794649 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:25 crc kubenswrapper[5102]: E0123 07:19:25.794691 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.790349 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.791170 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.791484 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.791520 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.793347 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.795579 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.797502 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:30 crc kubenswrapper[5102]: E0123 07:19:30.797586 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.728794 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-795454f649-697pp" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.823494 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-combined-ca-bundle\") pod \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.823603 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-httpd-config\") pod \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.823725 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-config\") pod \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.823789 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-ovndb-tls-certs\") pod \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.823845 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-public-tls-certs\") pod \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.823896 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzkdq\" (UniqueName: \"kubernetes.io/projected/76933dbd-cd7b-47f6-a8af-d216e0413bb7-kube-api-access-lzkdq\") pod \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.823956 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-internal-tls-certs\") pod \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\" (UID: \"76933dbd-cd7b-47f6-a8af-d216e0413bb7\") " Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.830846 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/76933dbd-cd7b-47f6-a8af-d216e0413bb7-kube-api-access-lzkdq" (OuterVolumeSpecName: "kube-api-access-lzkdq") pod "76933dbd-cd7b-47f6-a8af-d216e0413bb7" (UID: "76933dbd-cd7b-47f6-a8af-d216e0413bb7"). InnerVolumeSpecName "kube-api-access-lzkdq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.831358 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-httpd-config" (OuterVolumeSpecName: "httpd-config") pod "76933dbd-cd7b-47f6-a8af-d216e0413bb7" (UID: "76933dbd-cd7b-47f6-a8af-d216e0413bb7"). InnerVolumeSpecName "httpd-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.882408 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-internal-tls-certs" (OuterVolumeSpecName: "internal-tls-certs") pod "76933dbd-cd7b-47f6-a8af-d216e0413bb7" (UID: "76933dbd-cd7b-47f6-a8af-d216e0413bb7"). InnerVolumeSpecName "internal-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.883272 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-config" (OuterVolumeSpecName: "config") pod "76933dbd-cd7b-47f6-a8af-d216e0413bb7" (UID: "76933dbd-cd7b-47f6-a8af-d216e0413bb7"). InnerVolumeSpecName "config". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.884812 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "76933dbd-cd7b-47f6-a8af-d216e0413bb7" (UID: "76933dbd-cd7b-47f6-a8af-d216e0413bb7"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.894335 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-public-tls-certs" (OuterVolumeSpecName: "public-tls-certs") pod "76933dbd-cd7b-47f6-a8af-d216e0413bb7" (UID: "76933dbd-cd7b-47f6-a8af-d216e0413bb7"). InnerVolumeSpecName "public-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.904574 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-ovndb-tls-certs" (OuterVolumeSpecName: "ovndb-tls-certs") pod "76933dbd-cd7b-47f6-a8af-d216e0413bb7" (UID: "76933dbd-cd7b-47f6-a8af-d216e0413bb7"). InnerVolumeSpecName "ovndb-tls-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.926274 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.926326 5102 reconciler_common.go:293] "Volume detached for volume \"httpd-config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-httpd-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.926347 5102 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-config\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.926364 5102 reconciler_common.go:293] "Volume detached for volume \"ovndb-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-ovndb-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.926383 5102 reconciler_common.go:293] "Volume detached for volume \"public-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-public-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.926403 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzkdq\" (UniqueName: \"kubernetes.io/projected/76933dbd-cd7b-47f6-a8af-d216e0413bb7-kube-api-access-lzkdq\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:32 crc kubenswrapper[5102]: I0123 07:19:32.926424 5102 reconciler_common.go:293] "Volume detached for volume \"internal-tls-certs\" (UniqueName: \"kubernetes.io/secret/76933dbd-cd7b-47f6-a8af-d216e0413bb7-internal-tls-certs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.162363 5102 generic.go:334] "Generic (PLEG): container finished" podID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerID="a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba" exitCode=0 Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.162426 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-795454f649-697pp" event={"ID":"76933dbd-cd7b-47f6-a8af-d216e0413bb7","Type":"ContainerDied","Data":"a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba"} Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.162478 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/neutron-795454f649-697pp" event={"ID":"76933dbd-cd7b-47f6-a8af-d216e0413bb7","Type":"ContainerDied","Data":"c7b283157687d9e0d560f59da8badfd97039798e26a22723de1962d1f09fe7f1"} Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.162592 5102 scope.go:117] "RemoveContainer" containerID="7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.162781 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/neutron-795454f649-697pp" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.200750 5102 scope.go:117] "RemoveContainer" containerID="a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.217584 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/neutron-795454f649-697pp"] Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.227763 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/neutron-795454f649-697pp"] Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.239679 5102 scope.go:117] "RemoveContainer" containerID="7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31" Jan 23 07:19:33 crc kubenswrapper[5102]: E0123 07:19:33.240230 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31\": container with ID starting with 7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31 not found: ID does not exist" containerID="7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.240276 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31"} err="failed to get container status \"7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31\": rpc error: code = NotFound desc = could not find container \"7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31\": container with ID starting with 7009393feba7f32f10fe2ea3865bc24e9d1c7f908bff5a83c508644bae834f31 not found: ID does not exist" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.240311 5102 scope.go:117] "RemoveContainer" containerID="a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba" Jan 23 07:19:33 crc kubenswrapper[5102]: E0123 07:19:33.240672 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba\": container with ID starting with a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba not found: ID does not exist" containerID="a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.240710 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba"} err="failed to get container status \"a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba\": rpc error: code = NotFound desc = could not find container \"a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba\": container with ID starting with a20c0af152df88404938f0ee0b42cc77a86cacbb215793b9681b7fc3ad53e8ba not found: ID does not exist" Jan 23 07:19:33 crc kubenswrapper[5102]: I0123 07:19:33.612832 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" path="/var/lib/kubelet/pods/76933dbd-cd7b-47f6-a8af-d216e0413bb7/volumes" Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.792343 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.793037 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.793171 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.793711 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" cmd=["/usr/local/bin/container-scripts/ovsdb_server_readiness.sh"] Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.793757 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a is running failed: container process not found" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.795689 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.797791 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" cmd=["/usr/local/bin/container-scripts/vswitchd_readiness.sh"] Jan 23 07:19:35 crc kubenswrapper[5102]: E0123 07:19:35.798007 5102 prober.go:104] "Probe errored" err="rpc error: code = Unknown desc = command error: cannot register an exec PID: container is stopping, stdout: , stderr: , exit code -1" probeType="Readiness" pod="openstack/ovn-controller-ovs-h9gtx" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.828773 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-h9gtx_ac05d076-9929-479c-b5be-43eed0ee2dcc/ovs-vswitchd/0.log" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.831283 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944262 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac05d076-9929-479c-b5be-43eed0ee2dcc-scripts\") pod \"ac05d076-9929-479c-b5be-43eed0ee2dcc\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944307 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-run\") pod \"ac05d076-9929-479c-b5be-43eed0ee2dcc\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944326 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-log\") pod \"ac05d076-9929-479c-b5be-43eed0ee2dcc\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944383 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-btb7h\" (UniqueName: \"kubernetes.io/projected/ac05d076-9929-479c-b5be-43eed0ee2dcc-kube-api-access-btb7h\") pod \"ac05d076-9929-479c-b5be-43eed0ee2dcc\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944415 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-log" (OuterVolumeSpecName: "var-log") pod "ac05d076-9929-479c-b5be-43eed0ee2dcc" (UID: "ac05d076-9929-479c-b5be-43eed0ee2dcc"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944417 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-run" (OuterVolumeSpecName: "var-run") pod "ac05d076-9929-479c-b5be-43eed0ee2dcc" (UID: "ac05d076-9929-479c-b5be-43eed0ee2dcc"). InnerVolumeSpecName "var-run". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944442 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-lib\") pod \"ac05d076-9929-479c-b5be-43eed0ee2dcc\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944475 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-etc-ovs\") pod \"ac05d076-9929-479c-b5be-43eed0ee2dcc\" (UID: \"ac05d076-9929-479c-b5be-43eed0ee2dcc\") " Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944550 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-lib" (OuterVolumeSpecName: "var-lib") pod "ac05d076-9929-479c-b5be-43eed0ee2dcc" (UID: "ac05d076-9929-479c-b5be-43eed0ee2dcc"). InnerVolumeSpecName "var-lib". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.944678 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-etc-ovs" (OuterVolumeSpecName: "etc-ovs") pod "ac05d076-9929-479c-b5be-43eed0ee2dcc" (UID: "ac05d076-9929-479c-b5be-43eed0ee2dcc"). InnerVolumeSpecName "etc-ovs". PluginName "kubernetes.io/host-path", VolumeGidValue "" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.945099 5102 reconciler_common.go:293] "Volume detached for volume \"var-run\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-run\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.945114 5102 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-log\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.945122 5102 reconciler_common.go:293] "Volume detached for volume \"var-lib\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-var-lib\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.945130 5102 reconciler_common.go:293] "Volume detached for volume \"etc-ovs\" (UniqueName: \"kubernetes.io/host-path/ac05d076-9929-479c-b5be-43eed0ee2dcc-etc-ovs\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.945712 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ac05d076-9929-479c-b5be-43eed0ee2dcc-scripts" (OuterVolumeSpecName: "scripts") pod "ac05d076-9929-479c-b5be-43eed0ee2dcc" (UID: "ac05d076-9929-479c-b5be-43eed0ee2dcc"). InnerVolumeSpecName "scripts". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:19:38 crc kubenswrapper[5102]: I0123 07:19:38.954841 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ac05d076-9929-479c-b5be-43eed0ee2dcc-kube-api-access-btb7h" (OuterVolumeSpecName: "kube-api-access-btb7h") pod "ac05d076-9929-479c-b5be-43eed0ee2dcc" (UID: "ac05d076-9929-479c-b5be-43eed0ee2dcc"). InnerVolumeSpecName "kube-api-access-btb7h". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.049609 5102 reconciler_common.go:293] "Volume detached for volume \"scripts\" (UniqueName: \"kubernetes.io/configmap/ac05d076-9929-479c-b5be-43eed0ee2dcc-scripts\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.049648 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-btb7h\" (UniqueName: \"kubernetes.io/projected/ac05d076-9929-479c-b5be-43eed0ee2dcc-kube-api-access-btb7h\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.242627 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack_ovn-controller-ovs-h9gtx_ac05d076-9929-479c-b5be-43eed0ee2dcc/ovs-vswitchd/0.log" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.243531 5102 generic.go:334] "Generic (PLEG): container finished" podID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" exitCode=137 Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.243602 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-h9gtx" event={"ID":"ac05d076-9929-479c-b5be-43eed0ee2dcc","Type":"ContainerDied","Data":"e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31"} Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.243632 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/ovn-controller-ovs-h9gtx" event={"ID":"ac05d076-9929-479c-b5be-43eed0ee2dcc","Type":"ContainerDied","Data":"dee3cf005ffe767982b2518d6f239519e3e383a59fbddc7ef88bfb968bb3b207"} Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.243648 5102 scope.go:117] "RemoveContainer" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.243743 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/ovn-controller-ovs-h9gtx" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.248843 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.255410 5102 generic.go:334] "Generic (PLEG): container finished" podID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerID="f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e" exitCode=137 Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.255448 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e"} Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.255475 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openstack/swift-storage-0" event={"ID":"20474222-aadd-44c0-8c4e-f0b4bd0147c5","Type":"ContainerDied","Data":"0dfbd931491f7396bdacb10c215bb3018f09e51cf5fd12f72f6465cdd1e1e8f5"} Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.286032 5102 scope.go:117] "RemoveContainer" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.325755 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovn-controller-ovs-h9gtx"] Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.345140 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovn-controller-ovs-h9gtx"] Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.352684 5102 scope.go:117] "RemoveContainer" containerID="ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.353288 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-cache\") pod \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.354649 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-cache" (OuterVolumeSpecName: "cache") pod "20474222-aadd-44c0-8c4e-f0b4bd0147c5" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5"). InnerVolumeSpecName "cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.354774 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"swift\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") pod \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.354919 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-lock\") pod \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.355035 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") pod \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.355077 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20474222-aadd-44c0-8c4e-f0b4bd0147c5-combined-ca-bundle\") pod \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.355224 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5mfgc\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-kube-api-access-5mfgc\") pod \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\" (UID: \"20474222-aadd-44c0-8c4e-f0b4bd0147c5\") " Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.355878 5102 reconciler_common.go:293] "Volume detached for volume \"cache\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-cache\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.356200 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-lock" (OuterVolumeSpecName: "lock") pod "20474222-aadd-44c0-8c4e-f0b4bd0147c5" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5"). InnerVolumeSpecName "lock". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.360926 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/local-volume/local-storage05-crc" (OuterVolumeSpecName: "swift") pod "20474222-aadd-44c0-8c4e-f0b4bd0147c5" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5"). InnerVolumeSpecName "local-storage05-crc". PluginName "kubernetes.io/local-volume", VolumeGidValue "" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.361603 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift" (OuterVolumeSpecName: "etc-swift") pod "20474222-aadd-44c0-8c4e-f0b4bd0147c5" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5"). InnerVolumeSpecName "etc-swift". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.363393 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-kube-api-access-5mfgc" (OuterVolumeSpecName: "kube-api-access-5mfgc") pod "20474222-aadd-44c0-8c4e-f0b4bd0147c5" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5"). InnerVolumeSpecName "kube-api-access-5mfgc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.431905 5102 scope.go:117] "RemoveContainer" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.439887 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31\": container with ID starting with e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31 not found: ID does not exist" containerID="e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.439940 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31"} err="failed to get container status \"e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31\": rpc error: code = NotFound desc = could not find container \"e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31\": container with ID starting with e3683a67b5467522ee57c8f83bd59dee4fd68c15ddb6e6bf8ef8d57ec97f2b31 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.439973 5102 scope.go:117] "RemoveContainer" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.441873 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a\": container with ID starting with 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a not found: ID does not exist" containerID="89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.441901 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a"} err="failed to get container status \"89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a\": rpc error: code = NotFound desc = could not find container \"89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a\": container with ID starting with 89c8d6bb09d580fdd0e008e31325653d0bb1cb88731a5c1a2a726a993617230a not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.441920 5102 scope.go:117] "RemoveContainer" containerID="ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.442430 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d\": container with ID starting with ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d not found: ID does not exist" containerID="ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.442508 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d"} err="failed to get container status \"ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d\": rpc error: code = NotFound desc = could not find container \"ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d\": container with ID starting with ceac85427bf6a7204cb40fddaec7fb1e79755c3c11e4d6ca2fe6af265f13c67d not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.442528 5102 scope.go:117] "RemoveContainer" containerID="f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.457047 5102 reconciler_common.go:286] "operationExecutor.UnmountDevice started for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" " Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.457085 5102 reconciler_common.go:293] "Volume detached for volume \"lock\" (UniqueName: \"kubernetes.io/empty-dir/20474222-aadd-44c0-8c4e-f0b4bd0147c5-lock\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.457100 5102 reconciler_common.go:293] "Volume detached for volume \"etc-swift\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-etc-swift\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.457114 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5mfgc\" (UniqueName: \"kubernetes.io/projected/20474222-aadd-44c0-8c4e-f0b4bd0147c5-kube-api-access-5mfgc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.473475 5102 operation_generator.go:917] UnmountDevice succeeded for volume "local-storage05-crc" (UniqueName: "kubernetes.io/local-volume/local-storage05-crc") on node "crc" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.501491 5102 scope.go:117] "RemoveContainer" containerID="99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.521488 5102 scope.go:117] "RemoveContainer" containerID="1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.544895 5102 scope.go:117] "RemoveContainer" containerID="cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.558122 5102 reconciler_common.go:293] "Volume detached for volume \"local-storage05-crc\" (UniqueName: \"kubernetes.io/local-volume/local-storage05-crc\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.566972 5102 scope.go:117] "RemoveContainer" containerID="6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.590824 5102 scope.go:117] "RemoveContainer" containerID="67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.681871 5102 scope.go:117] "RemoveContainer" containerID="0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.683468 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" path="/var/lib/kubelet/pods/ac05d076-9929-479c-b5be-43eed0ee2dcc/volumes" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.704883 5102 scope.go:117] "RemoveContainer" containerID="8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.726408 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20474222-aadd-44c0-8c4e-f0b4bd0147c5-combined-ca-bundle" (OuterVolumeSpecName: "combined-ca-bundle") pod "20474222-aadd-44c0-8c4e-f0b4bd0147c5" (UID: "20474222-aadd-44c0-8c4e-f0b4bd0147c5"). InnerVolumeSpecName "combined-ca-bundle". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.727345 5102 scope.go:117] "RemoveContainer" containerID="860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.745239 5102 scope.go:117] "RemoveContainer" containerID="e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.760725 5102 scope.go:117] "RemoveContainer" containerID="a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.776654 5102 reconciler_common.go:293] "Volume detached for volume \"combined-ca-bundle\" (UniqueName: \"kubernetes.io/secret/20474222-aadd-44c0-8c4e-f0b4bd0147c5-combined-ca-bundle\") on node \"crc\" DevicePath \"\"" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.779424 5102 scope.go:117] "RemoveContainer" containerID="fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.801089 5102 scope.go:117] "RemoveContainer" containerID="166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.819105 5102 scope.go:117] "RemoveContainer" containerID="9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.835609 5102 scope.go:117] "RemoveContainer" containerID="403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.853189 5102 scope.go:117] "RemoveContainer" containerID="f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.853708 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e\": container with ID starting with f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e not found: ID does not exist" containerID="f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.853746 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e"} err="failed to get container status \"f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e\": rpc error: code = NotFound desc = could not find container \"f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e\": container with ID starting with f53333de112fa150aaa7ddd1e5d91a10300dc374a069a658ffc18340b709441e not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.853787 5102 scope.go:117] "RemoveContainer" containerID="99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.854092 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8\": container with ID starting with 99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8 not found: ID does not exist" containerID="99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.854116 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8"} err="failed to get container status \"99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8\": rpc error: code = NotFound desc = could not find container \"99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8\": container with ID starting with 99772ece1cacff896af37d3fcd7093d64e28273fd1fb11f856fcd46698c620a8 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.854144 5102 scope.go:117] "RemoveContainer" containerID="1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.854495 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68\": container with ID starting with 1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68 not found: ID does not exist" containerID="1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.854530 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68"} err="failed to get container status \"1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68\": rpc error: code = NotFound desc = could not find container \"1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68\": container with ID starting with 1d8a5cc474a29036d55b2759f4a9daaf72f58eee8c253dd541775bd7c4930b68 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.854569 5102 scope.go:117] "RemoveContainer" containerID="cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.854996 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332\": container with ID starting with cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332 not found: ID does not exist" containerID="cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.855036 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332"} err="failed to get container status \"cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332\": rpc error: code = NotFound desc = could not find container \"cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332\": container with ID starting with cabddca811c7cb4e2a60a81b98a7482621e04ef2973dda1ef6b3dead94be3332 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.855049 5102 scope.go:117] "RemoveContainer" containerID="6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.855308 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012\": container with ID starting with 6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012 not found: ID does not exist" containerID="6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.855324 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012"} err="failed to get container status \"6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012\": rpc error: code = NotFound desc = could not find container \"6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012\": container with ID starting with 6bc42c62c2afdbc50a26252d767daad61236c81290d7fb88974bd4959feaf012 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.855335 5102 scope.go:117] "RemoveContainer" containerID="67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.855645 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4\": container with ID starting with 67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4 not found: ID does not exist" containerID="67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.855670 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4"} err="failed to get container status \"67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4\": rpc error: code = NotFound desc = could not find container \"67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4\": container with ID starting with 67341e8baac648bb74212ee09c72120e201da10e36b369827d32386e00fe56d4 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.855686 5102 scope.go:117] "RemoveContainer" containerID="0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.855920 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8\": container with ID starting with 0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8 not found: ID does not exist" containerID="0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.855976 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8"} err="failed to get container status \"0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8\": rpc error: code = NotFound desc = could not find container \"0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8\": container with ID starting with 0aa8a52c93048a4acc5f714016d81c618c90e8deafd99efee4c1c23b27f161a8 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.856009 5102 scope.go:117] "RemoveContainer" containerID="8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.856302 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59\": container with ID starting with 8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59 not found: ID does not exist" containerID="8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.856328 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59"} err="failed to get container status \"8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59\": rpc error: code = NotFound desc = could not find container \"8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59\": container with ID starting with 8d571cbed6925864f2d0a8d2a90355e80d3ec1cd5e80d03e9a2a1116d9815b59 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.856342 5102 scope.go:117] "RemoveContainer" containerID="860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.856592 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314\": container with ID starting with 860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314 not found: ID does not exist" containerID="860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.856610 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314"} err="failed to get container status \"860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314\": rpc error: code = NotFound desc = could not find container \"860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314\": container with ID starting with 860ee21ff4b6e2d2c978b0dcd3446b1b1d8f6675291984b3a5ce0986458e6314 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.856621 5102 scope.go:117] "RemoveContainer" containerID="e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.856830 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb\": container with ID starting with e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb not found: ID does not exist" containerID="e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.856847 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb"} err="failed to get container status \"e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb\": rpc error: code = NotFound desc = could not find container \"e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb\": container with ID starting with e6e07ada360b97677411726190a140c22d0eda9fc600686dfb8fecd9f4a66bfb not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.856858 5102 scope.go:117] "RemoveContainer" containerID="a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.857045 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095\": container with ID starting with a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095 not found: ID does not exist" containerID="a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857062 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095"} err="failed to get container status \"a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095\": rpc error: code = NotFound desc = could not find container \"a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095\": container with ID starting with a4683572d0157d06a1eb7a29cb6bed6f9076bc27a9aa787b646debc742c56095 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857073 5102 scope.go:117] "RemoveContainer" containerID="fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.857273 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe\": container with ID starting with fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe not found: ID does not exist" containerID="fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857288 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe"} err="failed to get container status \"fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe\": rpc error: code = NotFound desc = could not find container \"fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe\": container with ID starting with fe4c15d2f4114b1a8106f86a0419d956678e1d56111dc6890a474dd06f150afe not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857301 5102 scope.go:117] "RemoveContainer" containerID="166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.857485 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93\": container with ID starting with 166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93 not found: ID does not exist" containerID="166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857500 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93"} err="failed to get container status \"166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93\": rpc error: code = NotFound desc = could not find container \"166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93\": container with ID starting with 166297cbe1de0dc22da83034a37c93d96480991cafccc69a3ba1ae007d27bd93 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857514 5102 scope.go:117] "RemoveContainer" containerID="9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.857737 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5\": container with ID starting with 9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5 not found: ID does not exist" containerID="9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857756 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5"} err="failed to get container status \"9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5\": rpc error: code = NotFound desc = could not find container \"9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5\": container with ID starting with 9da3eb3e1aa9d844f172dbe0f75e7cd3125bc051356794970e51d8f3d91fbed5 not found: ID does not exist" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857767 5102 scope.go:117] "RemoveContainer" containerID="403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13" Jan 23 07:19:39 crc kubenswrapper[5102]: E0123 07:19:39.857954 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13\": container with ID starting with 403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13 not found: ID does not exist" containerID="403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13" Jan 23 07:19:39 crc kubenswrapper[5102]: I0123 07:19:39.857982 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13"} err="failed to get container status \"403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13\": rpc error: code = NotFound desc = could not find container \"403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13\": container with ID starting with 403fb0745d22366078757b785b98811998a5982e1bec59427a47cf56de385e13 not found: ID does not exist" Jan 23 07:19:40 crc kubenswrapper[5102]: I0123 07:19:40.270530 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-storage-0" Jan 23 07:19:40 crc kubenswrapper[5102]: I0123 07:19:40.351773 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-storage-0"] Jan 23 07:19:40 crc kubenswrapper[5102]: I0123 07:19:40.361043 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-storage-0"] Jan 23 07:19:41 crc kubenswrapper[5102]: I0123 07:19:41.614636 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" path="/var/lib/kubelet/pods/20474222-aadd-44c0-8c4e-f0b4bd0147c5/volumes" Jan 23 07:19:41 crc kubenswrapper[5102]: I0123 07:19:41.797427 5102 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod2f7956cc-1c1c-410f-94f8-86feb62d9124"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod2f7956cc-1c1c-410f-94f8-86feb62d9124] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2f7956cc_1c1c_410f_94f8_86feb62d9124.slice" Jan 23 07:19:41 crc kubenswrapper[5102]: E0123 07:19:41.797502 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort pod2f7956cc-1c1c-410f-94f8-86feb62d9124] : unable to destroy cgroup paths for cgroup [kubepods besteffort pod2f7956cc-1c1c-410f-94f8-86feb62d9124] : Timed out while waiting for systemd to remove kubepods-besteffort-pod2f7956cc_1c1c_410f_94f8_86feb62d9124.slice" pod="openstack/ovsdbserver-sb-0" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" Jan 23 07:19:42 crc kubenswrapper[5102]: I0123 07:19:42.291872 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openstack/ovsdbserver-sb-0" Jan 23 07:19:42 crc kubenswrapper[5102]: I0123 07:19:42.359580 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 07:19:42 crc kubenswrapper[5102]: I0123 07:19:42.368606 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/ovsdbserver-sb-0"] Jan 23 07:19:43 crc kubenswrapper[5102]: I0123 07:19:43.614709 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2f7956cc-1c1c-410f-94f8-86feb62d9124" path="/var/lib/kubelet/pods/2f7956cc-1c1c-410f-94f8-86feb62d9124/volumes" Jan 23 07:19:44 crc kubenswrapper[5102]: I0123 07:19:44.196940 5102 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","pod0e1fd671-9192-4406-b7ea-3a33b4cdec57"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort pod0e1fd671-9192-4406-b7ea-3a33b4cdec57] : Timed out while waiting for systemd to remove kubepods-besteffort-pod0e1fd671_9192_4406_b7ea_3a33b4cdec57.slice" Jan 23 07:19:45 crc kubenswrapper[5102]: I0123 07:19:45.082647 5102 pod_container_manager_linux.go:210] "Failed to delete cgroup paths" cgroupName=["kubepods","besteffort","podac268af7-b49d-40bf-97c8-7abc5ff2bdad"] err="unable to destroy cgroup paths for cgroup [kubepods besteffort podac268af7-b49d-40bf-97c8-7abc5ff2bdad] : Timed out while waiting for systemd to remove kubepods-besteffort-podac268af7_b49d_40bf_97c8_7abc5ff2bdad.slice" Jan 23 07:19:45 crc kubenswrapper[5102]: E0123 07:19:45.082739 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to delete cgroup paths for [kubepods besteffort podac268af7-b49d-40bf-97c8-7abc5ff2bdad] : unable to destroy cgroup paths for cgroup [kubepods besteffort podac268af7-b49d-40bf-97c8-7abc5ff2bdad] : Timed out while waiting for systemd to remove kubepods-besteffort-podac268af7_b49d_40bf_97c8_7abc5ff2bdad.slice" pod="openstack/swift-proxy-845d4fc79c-bhsj4" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" Jan 23 07:19:45 crc kubenswrapper[5102]: I0123 07:19:45.344024 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openstack/swift-proxy-845d4fc79c-bhsj4" Jan 23 07:19:45 crc kubenswrapper[5102]: I0123 07:19:45.373975 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openstack/swift-proxy-845d4fc79c-bhsj4"] Jan 23 07:19:45 crc kubenswrapper[5102]: I0123 07:19:45.379404 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openstack/swift-proxy-845d4fc79c-bhsj4"] Jan 23 07:19:45 crc kubenswrapper[5102]: I0123 07:19:45.606111 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac268af7-b49d-40bf-97c8-7abc5ff2bdad" path="/var/lib/kubelet/pods/ac268af7-b49d-40bf-97c8-7abc5ff2bdad/volumes" Jan 23 07:19:47 crc kubenswrapper[5102]: I0123 07:19:47.661279 5102 prober.go:107] "Probe failed" probeType="Readiness" pod="openstack/keystone-756757b6f5-klql8" podUID="8dfe2011-cf9e-413e-b53a-c7ff73f81161" containerName="keystone-api" probeResult="failure" output="Get \"https://10.217.0.156:5000/v3\": net/http: request canceled while waiting for connection (Client.Timeout exceeded while awaiting headers)" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.356511 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-5jfbq"] Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357461 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357481 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357497 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" containerName="mysql-bootstrap" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357508 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" containerName="mysql-bootstrap" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357532 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357568 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357588 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357598 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357613 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357623 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357644 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerName="rabbitmq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357654 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerName="rabbitmq" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357673 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357681 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357696 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1893371f-b289-4336-a8ed-1bd78e9191b6" containerName="nova-cell0-conductor-conductor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357707 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1893371f-b289-4336-a8ed-1bd78e9191b6" containerName="nova-cell0-conductor-conductor" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357715 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ddb8da53-e17d-4c8d-a625-0d241d2caafd" containerName="kube-state-metrics" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357724 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ddb8da53-e17d-4c8d-a625-0d241d2caafd" containerName="kube-state-metrics" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357735 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357743 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357759 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357767 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357776 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357786 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357797 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerName="setup-container" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357806 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerName="setup-container" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357822 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357830 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357845 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server-init" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357853 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server-init" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357867 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-reaper" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357875 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-reaper" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357891 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357898 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357907 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357915 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357927 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="sg-core" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357934 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="sg-core" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.357944 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.357951 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358014 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358021 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358036 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-expirer" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358043 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-expirer" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358061 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358073 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358091 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-metadata" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358101 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-metadata" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358119 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358128 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358141 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerName="rabbitmq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358149 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerName="rabbitmq" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358158 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerName="setup-container" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358165 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerName="setup-container" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358173 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358181 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358193 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358201 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-api" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358217 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358224 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358238 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358246 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358258 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358266 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-server" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358276 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358283 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358291 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8dfe2011-cf9e-413e-b53a-c7ff73f81161" containerName="keystone-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358298 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="8dfe2011-cf9e-413e-b53a-c7ff73f81161" containerName="keystone-api" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358313 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" containerName="galera" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358322 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" containerName="galera" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358336 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358346 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-api" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358358 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358365 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-api" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358376 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358385 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358396 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-notification-agent" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358404 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-notification-agent" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358416 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d0498339-2dc7-4527-8396-50bbd00b8443" containerName="memcached" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358423 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d0498339-2dc7-4527-8396-50bbd00b8443" containerName="memcached" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358434 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="cinder-scheduler" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358441 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="cinder-scheduler" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358453 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358461 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-log" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358472 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c65ea3f-14be-4130-b116-2291c114323e" containerName="nova-cell1-conductor-conductor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358479 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c65ea3f-14be-4130-b116-2291c114323e" containerName="nova-cell1-conductor-conductor" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358492 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="probe" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358500 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="probe" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358508 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358516 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-server" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358526 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358533 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-server" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358567 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-central-agent" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358577 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-central-agent" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358587 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="proxy-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358595 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="proxy-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358608 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="swift-recon-cron" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358615 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="swift-recon-cron" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358627 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358634 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358654 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="ovn-northd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358662 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="ovn-northd" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358671 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358679 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358690 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-updater" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358698 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-updater" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358711 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358718 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358732 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="rsync" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358739 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="rsync" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358747 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="openstack-network-exporter" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358756 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="openstack-network-exporter" Jan 23 07:19:57 crc kubenswrapper[5102]: E0123 07:19:57.358764 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-updater" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358771 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-updater" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358932 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-metadata" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358945 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358956 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="swift-recon-cron" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358969 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358981 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-updater" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.358995 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ede537b-39d8-483c-9a2d-4ace36319060" containerName="galera" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359004 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359017 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="ovn-northd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359027 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-notification-agent" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359037 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="47725711-7e88-4c25-8016-f70488231203" containerName="nova-metadata-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359049 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359058 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1893371f-b289-4336-a8ed-1bd78e9191b6" containerName="nova-cell0-conductor-conductor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359070 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovs-vswitchd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359084 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359099 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c3459b4-efed-4868-8fd0-ffeb07f0100d" containerName="glance-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359118 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359131 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359144 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359164 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="76933dbd-cd7b-47f6-a8af-d216e0413bb7" containerName="neutron-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359184 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359195 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359211 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359221 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359232 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="cinder-scheduler" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359243 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="302ce3d2-72f6-429c-b3cb-16e8fba0d04e" containerName="glance-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359257 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4fc3e1d-5fac-4696-a8eb-709db37b5ff6" containerName="rabbitmq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359266 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359274 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="ceilometer-central-agent" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359284 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359292 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-expirer" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359304 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="account-reaper" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359314 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359323 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1a44c7a2-d363-4438-b9db-ebd62b910427" containerName="openstack-network-exporter" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359335 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6abef536-ae8a-4a68-9c29-87a9af5aaee6" containerName="barbican-worker" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359346 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="proxy-httpd" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359354 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="35dab127-50f2-4f30-ba2f-68744d0a6ae8" containerName="placement-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359366 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="6c65ea3f-14be-4130-b116-2291c114323e" containerName="nova-cell1-conductor-conductor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359377 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ddb8da53-e17d-4c8d-a625-0d241d2caafd" containerName="kube-state-metrics" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359390 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="rsync" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359398 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e3bed276-ffe5-4d67-b34c-8e5a8b6c61f8" containerName="sg-core" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359409 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4c08d6c5-8422-4da2-b8f3-2760dbebc521" containerName="probe" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359422 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="bd9d4f50-cf1a-4235-8fc5-502b4a488cb8" containerName="cinder-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359433 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d0498339-2dc7-4527-8396-50bbd00b8443" containerName="memcached" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359442 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-updater" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359453 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="container-auditor" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359464 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="20474222-aadd-44c0-8c4e-f0b4bd0147c5" containerName="object-replicator" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359473 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ea732e7-d11d-4e12-9d44-f8fcafa50de5" containerName="rabbitmq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359485 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="84ff9e74-154d-4279-befe-109c03fb7c3b" containerName="barbican-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359496 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ac05d076-9929-479c-b5be-43eed0ee2dcc" containerName="ovsdb-server" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359506 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359518 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="8dfe2011-cf9e-413e-b53a-c7ff73f81161" containerName="keystone-api" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359530 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="57f488ce-4b72-40f4-82d8-ad074776c306" containerName="barbican-keystone-listener" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.359579 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="66d1a45d-2635-496c-92c1-86e3a686c5b8" containerName="nova-api-log" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.360897 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.379294 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5jfbq"] Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.460462 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pbccb\" (UniqueName: \"kubernetes.io/projected/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-kube-api-access-pbccb\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.460586 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-utilities\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.460655 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-catalog-content\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.562383 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pbccb\" (UniqueName: \"kubernetes.io/projected/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-kube-api-access-pbccb\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.562456 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-utilities\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.562524 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-catalog-content\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.562979 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-utilities\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.563175 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-catalog-content\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.588882 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pbccb\" (UniqueName: \"kubernetes.io/projected/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-kube-api-access-pbccb\") pod \"certified-operators-5jfbq\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:57 crc kubenswrapper[5102]: I0123 07:19:57.690582 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:19:58 crc kubenswrapper[5102]: I0123 07:19:58.173112 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-5jfbq"] Jan 23 07:19:58 crc kubenswrapper[5102]: I0123 07:19:58.508345 5102 generic.go:334] "Generic (PLEG): container finished" podID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerID="3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39" exitCode=0 Jan 23 07:19:58 crc kubenswrapper[5102]: I0123 07:19:58.508445 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jfbq" event={"ID":"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c","Type":"ContainerDied","Data":"3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39"} Jan 23 07:19:58 crc kubenswrapper[5102]: I0123 07:19:58.508692 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jfbq" event={"ID":"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c","Type":"ContainerStarted","Data":"73f29aa731f23055f974d8438a34f90d79f4e726d6f8dc43d807a5b9822cdbad"} Jan 23 07:19:59 crc kubenswrapper[5102]: I0123 07:19:59.519876 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jfbq" event={"ID":"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c","Type":"ContainerStarted","Data":"8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca"} Jan 23 07:20:00 crc kubenswrapper[5102]: I0123 07:20:00.533061 5102 generic.go:334] "Generic (PLEG): container finished" podID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerID="8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca" exitCode=0 Jan 23 07:20:00 crc kubenswrapper[5102]: I0123 07:20:00.533128 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jfbq" event={"ID":"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c","Type":"ContainerDied","Data":"8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca"} Jan 23 07:20:01 crc kubenswrapper[5102]: I0123 07:20:01.545446 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jfbq" event={"ID":"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c","Type":"ContainerStarted","Data":"5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794"} Jan 23 07:20:01 crc kubenswrapper[5102]: I0123 07:20:01.573070 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-5jfbq" podStartSLOduration=1.988542567 podStartE2EDuration="4.57304886s" podCreationTimestamp="2026-01-23 07:19:57 +0000 UTC" firstStartedPulling="2026-01-23 07:19:58.511518285 +0000 UTC m=+1549.331867270" lastFinishedPulling="2026-01-23 07:20:01.096024588 +0000 UTC m=+1551.916373563" observedRunningTime="2026-01-23 07:20:01.56972091 +0000 UTC m=+1552.390069915" watchObservedRunningTime="2026-01-23 07:20:01.57304886 +0000 UTC m=+1552.393397855" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.159039 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-944bd"] Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.162622 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.168249 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-944bd"] Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.294814 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-catalog-content\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.294927 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-utilities\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.295072 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wnk67\" (UniqueName: \"kubernetes.io/projected/c1c43a27-01ef-4a89-b21b-1e513540d336-kube-api-access-wnk67\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.396487 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-utilities\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.396611 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wnk67\" (UniqueName: \"kubernetes.io/projected/c1c43a27-01ef-4a89-b21b-1e513540d336-kube-api-access-wnk67\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.396691 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-catalog-content\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.397347 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-utilities\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.397362 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-catalog-content\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.423165 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wnk67\" (UniqueName: \"kubernetes.io/projected/c1c43a27-01ef-4a89-b21b-1e513540d336-kube-api-access-wnk67\") pod \"community-operators-944bd\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:05 crc kubenswrapper[5102]: I0123 07:20:05.528439 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:06 crc kubenswrapper[5102]: I0123 07:20:06.061160 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-944bd"] Jan 23 07:20:06 crc kubenswrapper[5102]: I0123 07:20:06.603883 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-944bd" event={"ID":"c1c43a27-01ef-4a89-b21b-1e513540d336","Type":"ContainerStarted","Data":"8165354882e4ddfc576a62cdde1df7eb915e89f38b14adb3e959726aab6f13b9"} Jan 23 07:20:07 crc kubenswrapper[5102]: I0123 07:20:07.629816 5102 generic.go:334] "Generic (PLEG): container finished" podID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerID="734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664" exitCode=0 Jan 23 07:20:07 crc kubenswrapper[5102]: I0123 07:20:07.629888 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-944bd" event={"ID":"c1c43a27-01ef-4a89-b21b-1e513540d336","Type":"ContainerDied","Data":"734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664"} Jan 23 07:20:07 crc kubenswrapper[5102]: I0123 07:20:07.691147 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:20:07 crc kubenswrapper[5102]: I0123 07:20:07.691218 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:20:07 crc kubenswrapper[5102]: I0123 07:20:07.766014 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:20:08 crc kubenswrapper[5102]: I0123 07:20:08.643889 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-944bd" event={"ID":"c1c43a27-01ef-4a89-b21b-1e513540d336","Type":"ContainerStarted","Data":"8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9"} Jan 23 07:20:08 crc kubenswrapper[5102]: I0123 07:20:08.716793 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:20:09 crc kubenswrapper[5102]: I0123 07:20:09.658276 5102 generic.go:334] "Generic (PLEG): container finished" podID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerID="8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9" exitCode=0 Jan 23 07:20:09 crc kubenswrapper[5102]: I0123 07:20:09.658419 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-944bd" event={"ID":"c1c43a27-01ef-4a89-b21b-1e513540d336","Type":"ContainerDied","Data":"8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9"} Jan 23 07:20:10 crc kubenswrapper[5102]: I0123 07:20:10.130359 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5jfbq"] Jan 23 07:20:10 crc kubenswrapper[5102]: I0123 07:20:10.672820 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-5jfbq" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="registry-server" containerID="cri-o://5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794" gracePeriod=2 Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.173105 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.281073 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pbccb\" (UniqueName: \"kubernetes.io/projected/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-kube-api-access-pbccb\") pod \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.281137 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-catalog-content\") pod \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.281291 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-utilities\") pod \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\" (UID: \"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c\") " Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.284063 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-utilities" (OuterVolumeSpecName: "utilities") pod "bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" (UID: "bb7bb8c7-1c78-4f40-9707-9099ff0ce31c"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.299957 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-kube-api-access-pbccb" (OuterVolumeSpecName: "kube-api-access-pbccb") pod "bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" (UID: "bb7bb8c7-1c78-4f40-9707-9099ff0ce31c"). InnerVolumeSpecName "kube-api-access-pbccb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.336732 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" (UID: "bb7bb8c7-1c78-4f40-9707-9099ff0ce31c"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.383614 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.383665 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pbccb\" (UniqueName: \"kubernetes.io/projected/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-kube-api-access-pbccb\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.383682 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.688027 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-944bd" event={"ID":"c1c43a27-01ef-4a89-b21b-1e513540d336","Type":"ContainerStarted","Data":"34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b"} Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.695194 5102 generic.go:334] "Generic (PLEG): container finished" podID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerID="5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794" exitCode=0 Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.695232 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jfbq" event={"ID":"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c","Type":"ContainerDied","Data":"5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794"} Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.695256 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-5jfbq" event={"ID":"bb7bb8c7-1c78-4f40-9707-9099ff0ce31c","Type":"ContainerDied","Data":"73f29aa731f23055f974d8438a34f90d79f4e726d6f8dc43d807a5b9822cdbad"} Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.695275 5102 scope.go:117] "RemoveContainer" containerID="5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.695445 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-5jfbq" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.714345 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-944bd" podStartSLOduration=3.838701678 podStartE2EDuration="6.714325985s" podCreationTimestamp="2026-01-23 07:20:05 +0000 UTC" firstStartedPulling="2026-01-23 07:20:07.632335927 +0000 UTC m=+1558.452684942" lastFinishedPulling="2026-01-23 07:20:10.507960264 +0000 UTC m=+1561.328309249" observedRunningTime="2026-01-23 07:20:11.710828 +0000 UTC m=+1562.531176975" watchObservedRunningTime="2026-01-23 07:20:11.714325985 +0000 UTC m=+1562.534674960" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.727624 5102 scope.go:117] "RemoveContainer" containerID="8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.751698 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-5jfbq"] Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.751756 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-5jfbq"] Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.789277 5102 scope.go:117] "RemoveContainer" containerID="3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.818571 5102 scope.go:117] "RemoveContainer" containerID="5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794" Jan 23 07:20:11 crc kubenswrapper[5102]: E0123 07:20:11.819074 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794\": container with ID starting with 5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794 not found: ID does not exist" containerID="5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.819147 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794"} err="failed to get container status \"5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794\": rpc error: code = NotFound desc = could not find container \"5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794\": container with ID starting with 5b5bd3fa73cbf1e4f9e8383a2077325d5755fdf3dcef337da67002325383e794 not found: ID does not exist" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.819175 5102 scope.go:117] "RemoveContainer" containerID="8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca" Jan 23 07:20:11 crc kubenswrapper[5102]: E0123 07:20:11.819460 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca\": container with ID starting with 8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca not found: ID does not exist" containerID="8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.819512 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca"} err="failed to get container status \"8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca\": rpc error: code = NotFound desc = could not find container \"8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca\": container with ID starting with 8680662655f6f3876c3f9dac839c4e53ce747c9d62ea0b86c505cd0d403052ca not found: ID does not exist" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.819538 5102 scope.go:117] "RemoveContainer" containerID="3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39" Jan 23 07:20:11 crc kubenswrapper[5102]: E0123 07:20:11.819816 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39\": container with ID starting with 3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39 not found: ID does not exist" containerID="3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39" Jan 23 07:20:11 crc kubenswrapper[5102]: I0123 07:20:11.819861 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39"} err="failed to get container status \"3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39\": rpc error: code = NotFound desc = could not find container \"3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39\": container with ID starting with 3310a7e9a66b052b00bbf049950db49ef1223a9d28db8df219402ff5378d2d39 not found: ID does not exist" Jan 23 07:20:13 crc kubenswrapper[5102]: I0123 07:20:13.616501 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" path="/var/lib/kubelet/pods/bb7bb8c7-1c78-4f40-9707-9099ff0ce31c/volumes" Jan 23 07:20:15 crc kubenswrapper[5102]: I0123 07:20:15.529603 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:15 crc kubenswrapper[5102]: I0123 07:20:15.530102 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:15 crc kubenswrapper[5102]: I0123 07:20:15.617903 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:15 crc kubenswrapper[5102]: I0123 07:20:15.773773 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:16 crc kubenswrapper[5102]: I0123 07:20:16.107846 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-944bd"] Jan 23 07:20:17 crc kubenswrapper[5102]: I0123 07:20:17.750925 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-944bd" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="registry-server" containerID="cri-o://34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b" gracePeriod=2 Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.747003 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.761357 5102 generic.go:334] "Generic (PLEG): container finished" podID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerID="34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b" exitCode=0 Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.761411 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-944bd" event={"ID":"c1c43a27-01ef-4a89-b21b-1e513540d336","Type":"ContainerDied","Data":"34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b"} Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.761447 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-944bd" event={"ID":"c1c43a27-01ef-4a89-b21b-1e513540d336","Type":"ContainerDied","Data":"8165354882e4ddfc576a62cdde1df7eb915e89f38b14adb3e959726aab6f13b9"} Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.761474 5102 scope.go:117] "RemoveContainer" containerID="34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.761659 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-944bd" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.800322 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wnk67\" (UniqueName: \"kubernetes.io/projected/c1c43a27-01ef-4a89-b21b-1e513540d336-kube-api-access-wnk67\") pod \"c1c43a27-01ef-4a89-b21b-1e513540d336\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.800449 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-catalog-content\") pod \"c1c43a27-01ef-4a89-b21b-1e513540d336\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.800590 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-utilities\") pod \"c1c43a27-01ef-4a89-b21b-1e513540d336\" (UID: \"c1c43a27-01ef-4a89-b21b-1e513540d336\") " Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.802280 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-utilities" (OuterVolumeSpecName: "utilities") pod "c1c43a27-01ef-4a89-b21b-1e513540d336" (UID: "c1c43a27-01ef-4a89-b21b-1e513540d336"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.804639 5102 scope.go:117] "RemoveContainer" containerID="8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.812897 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c1c43a27-01ef-4a89-b21b-1e513540d336-kube-api-access-wnk67" (OuterVolumeSpecName: "kube-api-access-wnk67") pod "c1c43a27-01ef-4a89-b21b-1e513540d336" (UID: "c1c43a27-01ef-4a89-b21b-1e513540d336"). InnerVolumeSpecName "kube-api-access-wnk67". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.854078 5102 scope.go:117] "RemoveContainer" containerID="734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.881056 5102 scope.go:117] "RemoveContainer" containerID="34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b" Jan 23 07:20:18 crc kubenswrapper[5102]: E0123 07:20:18.881673 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b\": container with ID starting with 34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b not found: ID does not exist" containerID="34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.881729 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b"} err="failed to get container status \"34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b\": rpc error: code = NotFound desc = could not find container \"34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b\": container with ID starting with 34fff260a0bc734b5d4e3c4358a339f028635e755560af888d5972825736f35b not found: ID does not exist" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.881763 5102 scope.go:117] "RemoveContainer" containerID="8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9" Jan 23 07:20:18 crc kubenswrapper[5102]: E0123 07:20:18.882209 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9\": container with ID starting with 8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9 not found: ID does not exist" containerID="8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.882247 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9"} err="failed to get container status \"8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9\": rpc error: code = NotFound desc = could not find container \"8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9\": container with ID starting with 8d9433552211033985285428ddbd3e16e6c25f85f3f4f4f4dd5b0002eaaffdd9 not found: ID does not exist" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.882272 5102 scope.go:117] "RemoveContainer" containerID="734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664" Jan 23 07:20:18 crc kubenswrapper[5102]: E0123 07:20:18.882631 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664\": container with ID starting with 734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664 not found: ID does not exist" containerID="734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.882666 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664"} err="failed to get container status \"734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664\": rpc error: code = NotFound desc = could not find container \"734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664\": container with ID starting with 734380ac0d2d0d64e8176b1b9e670738006ffdb6b1672bed190f960e848c9664 not found: ID does not exist" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.896487 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "c1c43a27-01ef-4a89-b21b-1e513540d336" (UID: "c1c43a27-01ef-4a89-b21b-1e513540d336"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.902278 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.902321 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/c1c43a27-01ef-4a89-b21b-1e513540d336-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:18 crc kubenswrapper[5102]: I0123 07:20:18.902336 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wnk67\" (UniqueName: \"kubernetes.io/projected/c1c43a27-01ef-4a89-b21b-1e513540d336-kube-api-access-wnk67\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:19 crc kubenswrapper[5102]: I0123 07:20:19.104363 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-944bd"] Jan 23 07:20:19 crc kubenswrapper[5102]: I0123 07:20:19.118975 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-944bd"] Jan 23 07:20:19 crc kubenswrapper[5102]: I0123 07:20:19.614135 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" path="/var/lib/kubelet/pods/c1c43a27-01ef-4a89-b21b-1e513540d336/volumes" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.948977 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-7sffm"] Jan 23 07:20:30 crc kubenswrapper[5102]: E0123 07:20:30.950076 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="extract-utilities" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950093 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="extract-utilities" Jan 23 07:20:30 crc kubenswrapper[5102]: E0123 07:20:30.950118 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="extract-content" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950126 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="extract-content" Jan 23 07:20:30 crc kubenswrapper[5102]: E0123 07:20:30.950143 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="registry-server" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950168 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="registry-server" Jan 23 07:20:30 crc kubenswrapper[5102]: E0123 07:20:30.950177 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="extract-utilities" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950185 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="extract-utilities" Jan 23 07:20:30 crc kubenswrapper[5102]: E0123 07:20:30.950194 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="registry-server" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950201 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="registry-server" Jan 23 07:20:30 crc kubenswrapper[5102]: E0123 07:20:30.950215 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="extract-content" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950223 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="extract-content" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950406 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="bb7bb8c7-1c78-4f40-9707-9099ff0ce31c" containerName="registry-server" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.950434 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="c1c43a27-01ef-4a89-b21b-1e513540d336" containerName="registry-server" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.951655 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:30 crc kubenswrapper[5102]: I0123 07:20:30.970746 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sffm"] Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.096997 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-catalog-content\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.097072 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-l29g5\" (UniqueName: \"kubernetes.io/projected/14db9f8a-a648-4b9e-a845-639556a9f476-kube-api-access-l29g5\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.097225 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-utilities\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.198437 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-utilities\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.198531 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-catalog-content\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.198590 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-l29g5\" (UniqueName: \"kubernetes.io/projected/14db9f8a-a648-4b9e-a845-639556a9f476-kube-api-access-l29g5\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.199068 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-utilities\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.199137 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-catalog-content\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.216882 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-l29g5\" (UniqueName: \"kubernetes.io/projected/14db9f8a-a648-4b9e-a845-639556a9f476-kube-api-access-l29g5\") pod \"redhat-marketplace-7sffm\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.271889 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.705929 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sffm"] Jan 23 07:20:31 crc kubenswrapper[5102]: W0123 07:20:31.714523 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod14db9f8a_a648_4b9e_a845_639556a9f476.slice/crio-64020c1217eb5dcfb415a404fd2d1d134ec941c521672edd69a8a9bfe2fe0996 WatchSource:0}: Error finding container 64020c1217eb5dcfb415a404fd2d1d134ec941c521672edd69a8a9bfe2fe0996: Status 404 returned error can't find the container with id 64020c1217eb5dcfb415a404fd2d1d134ec941c521672edd69a8a9bfe2fe0996 Jan 23 07:20:31 crc kubenswrapper[5102]: I0123 07:20:31.904748 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sffm" event={"ID":"14db9f8a-a648-4b9e-a845-639556a9f476","Type":"ContainerStarted","Data":"64020c1217eb5dcfb415a404fd2d1d134ec941c521672edd69a8a9bfe2fe0996"} Jan 23 07:20:32 crc kubenswrapper[5102]: I0123 07:20:32.916089 5102 generic.go:334] "Generic (PLEG): container finished" podID="14db9f8a-a648-4b9e-a845-639556a9f476" containerID="5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0" exitCode=0 Jan 23 07:20:32 crc kubenswrapper[5102]: I0123 07:20:32.916141 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sffm" event={"ID":"14db9f8a-a648-4b9e-a845-639556a9f476","Type":"ContainerDied","Data":"5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0"} Jan 23 07:20:33 crc kubenswrapper[5102]: I0123 07:20:33.925863 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sffm" event={"ID":"14db9f8a-a648-4b9e-a845-639556a9f476","Type":"ContainerStarted","Data":"64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6"} Jan 23 07:20:34 crc kubenswrapper[5102]: I0123 07:20:34.941153 5102 generic.go:334] "Generic (PLEG): container finished" podID="14db9f8a-a648-4b9e-a845-639556a9f476" containerID="64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6" exitCode=0 Jan 23 07:20:34 crc kubenswrapper[5102]: I0123 07:20:34.941219 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sffm" event={"ID":"14db9f8a-a648-4b9e-a845-639556a9f476","Type":"ContainerDied","Data":"64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6"} Jan 23 07:20:35 crc kubenswrapper[5102]: I0123 07:20:35.955950 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sffm" event={"ID":"14db9f8a-a648-4b9e-a845-639556a9f476","Type":"ContainerStarted","Data":"414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643"} Jan 23 07:20:35 crc kubenswrapper[5102]: I0123 07:20:35.978526 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-7sffm" podStartSLOduration=3.358910706 podStartE2EDuration="5.978499826s" podCreationTimestamp="2026-01-23 07:20:30 +0000 UTC" firstStartedPulling="2026-01-23 07:20:32.91855651 +0000 UTC m=+1583.738905485" lastFinishedPulling="2026-01-23 07:20:35.53814559 +0000 UTC m=+1586.358494605" observedRunningTime="2026-01-23 07:20:35.973336183 +0000 UTC m=+1586.793685168" watchObservedRunningTime="2026-01-23 07:20:35.978499826 +0000 UTC m=+1586.798848821" Jan 23 07:20:41 crc kubenswrapper[5102]: I0123 07:20:41.272024 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:41 crc kubenswrapper[5102]: I0123 07:20:41.272358 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:41 crc kubenswrapper[5102]: I0123 07:20:41.342122 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:42 crc kubenswrapper[5102]: I0123 07:20:42.067464 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:42 crc kubenswrapper[5102]: I0123 07:20:42.125601 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sffm"] Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.038974 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-7sffm" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="registry-server" containerID="cri-o://414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643" gracePeriod=2 Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.428497 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.524356 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-l29g5\" (UniqueName: \"kubernetes.io/projected/14db9f8a-a648-4b9e-a845-639556a9f476-kube-api-access-l29g5\") pod \"14db9f8a-a648-4b9e-a845-639556a9f476\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.524423 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-catalog-content\") pod \"14db9f8a-a648-4b9e-a845-639556a9f476\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.524554 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-utilities\") pod \"14db9f8a-a648-4b9e-a845-639556a9f476\" (UID: \"14db9f8a-a648-4b9e-a845-639556a9f476\") " Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.526207 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-utilities" (OuterVolumeSpecName: "utilities") pod "14db9f8a-a648-4b9e-a845-639556a9f476" (UID: "14db9f8a-a648-4b9e-a845-639556a9f476"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.536863 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14db9f8a-a648-4b9e-a845-639556a9f476-kube-api-access-l29g5" (OuterVolumeSpecName: "kube-api-access-l29g5") pod "14db9f8a-a648-4b9e-a845-639556a9f476" (UID: "14db9f8a-a648-4b9e-a845-639556a9f476"). InnerVolumeSpecName "kube-api-access-l29g5". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.564777 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14db9f8a-a648-4b9e-a845-639556a9f476" (UID: "14db9f8a-a648-4b9e-a845-639556a9f476"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.626817 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.626878 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-l29g5\" (UniqueName: \"kubernetes.io/projected/14db9f8a-a648-4b9e-a845-639556a9f476-kube-api-access-l29g5\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:44 crc kubenswrapper[5102]: I0123 07:20:44.626903 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14db9f8a-a648-4b9e-a845-639556a9f476-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.056843 5102 generic.go:334] "Generic (PLEG): container finished" podID="14db9f8a-a648-4b9e-a845-639556a9f476" containerID="414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643" exitCode=0 Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.056917 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sffm" event={"ID":"14db9f8a-a648-4b9e-a845-639556a9f476","Type":"ContainerDied","Data":"414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643"} Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.057003 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-7sffm" event={"ID":"14db9f8a-a648-4b9e-a845-639556a9f476","Type":"ContainerDied","Data":"64020c1217eb5dcfb415a404fd2d1d134ec941c521672edd69a8a9bfe2fe0996"} Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.057037 5102 scope.go:117] "RemoveContainer" containerID="414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.056934 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-7sffm" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.094495 5102 scope.go:117] "RemoveContainer" containerID="64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.121112 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sffm"] Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.132909 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-7sffm"] Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.139504 5102 scope.go:117] "RemoveContainer" containerID="5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.166603 5102 scope.go:117] "RemoveContainer" containerID="414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643" Jan 23 07:20:45 crc kubenswrapper[5102]: E0123 07:20:45.167057 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643\": container with ID starting with 414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643 not found: ID does not exist" containerID="414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.167099 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643"} err="failed to get container status \"414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643\": rpc error: code = NotFound desc = could not find container \"414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643\": container with ID starting with 414dab567f4cbba0d2ac3cc2314c9fb428abac7a4a9d9757227729c8aa3f6643 not found: ID does not exist" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.167134 5102 scope.go:117] "RemoveContainer" containerID="64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6" Jan 23 07:20:45 crc kubenswrapper[5102]: E0123 07:20:45.167499 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6\": container with ID starting with 64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6 not found: ID does not exist" containerID="64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.167530 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6"} err="failed to get container status \"64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6\": rpc error: code = NotFound desc = could not find container \"64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6\": container with ID starting with 64774a5d68c8385eb4a45adac582befecad38e047fd40c5b35fe20edf192c0b6 not found: ID does not exist" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.167563 5102 scope.go:117] "RemoveContainer" containerID="5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0" Jan 23 07:20:45 crc kubenswrapper[5102]: E0123 07:20:45.167785 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0\": container with ID starting with 5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0 not found: ID does not exist" containerID="5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.167811 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0"} err="failed to get container status \"5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0\": rpc error: code = NotFound desc = could not find container \"5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0\": container with ID starting with 5d4da26bb660c027b2d50cfde8b66aca16fbac4d53ac23eb85f61848aa4f6fa0 not found: ID does not exist" Jan 23 07:20:45 crc kubenswrapper[5102]: I0123 07:20:45.609989 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" path="/var/lib/kubelet/pods/14db9f8a-a648-4b9e-a845-639556a9f476/volumes" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.612705 5102 scope.go:117] "RemoveContainer" containerID="4d42dba66296dcae374056c93eefa27df0374a17bdd663a51a98a8779a3b1ae0" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.674686 5102 scope.go:117] "RemoveContainer" containerID="23be25efbbbb49cf5b313ae643ee992afe2cc9a873f12cb787a10ffbaa4b1f4e" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.711671 5102 scope.go:117] "RemoveContainer" containerID="972f9a84e458c913ea4e1bc1b2c2dfc03afd961f0eabaeb65d9865461e35e88c" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.741489 5102 scope.go:117] "RemoveContainer" containerID="a2c0fa2bd612a343ce6b3fcfef0d06e46c76af03843c6c3a225b21e106594666" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.785636 5102 scope.go:117] "RemoveContainer" containerID="6e2bf46f7c946934a80177b09a7d6e8d66e8926c7025dcda6c07822a13e75707" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.811893 5102 scope.go:117] "RemoveContainer" containerID="ed0bb614f41e42220655c7ef417f5313bc2d9845fc86d13a8a928923ae9b75a7" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.846785 5102 scope.go:117] "RemoveContainer" containerID="4d9a56b175cc4185c212114ef997d2459b218cdbc2075a01ce27a964d06ddb8e" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.867806 5102 scope.go:117] "RemoveContainer" containerID="32798ef7ae044e71d1056dc26d7107caa27657b6f9372f44acb759953237e2d4" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.890621 5102 scope.go:117] "RemoveContainer" containerID="0296f13483bdd5b016788ffbae8298e92f4031ce3d313e2b7027e65f01e58acd" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.912386 5102 scope.go:117] "RemoveContainer" containerID="0910323f4efad34b947cd9ea3328e56c11965b6cf6e98f6586c7a0af4f002f66" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.930805 5102 scope.go:117] "RemoveContainer" containerID="6d46e8fc78e649e7afb857616146a41ab67408309a95afe99332c144e8fb1f6f" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.948698 5102 scope.go:117] "RemoveContainer" containerID="c6b2bc5b237e5309ec098641c4039d7dd50f686b87ac28ddae325a217c728b3f" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.966588 5102 scope.go:117] "RemoveContainer" containerID="17ec3f7e6cb9c3d8a6294e7054c5af457a6f4c6a3ebb59c1056f1d6e8a6ba5c5" Jan 23 07:21:12 crc kubenswrapper[5102]: I0123 07:21:12.987483 5102 scope.go:117] "RemoveContainer" containerID="d8ae3da02197050db71977dd19c8a0685c7287d6b6e0cba5a68823e64d602cd4" Jan 23 07:21:13 crc kubenswrapper[5102]: I0123 07:21:13.011449 5102 scope.go:117] "RemoveContainer" containerID="decc8d23997068046d25aae949afcb423ad378742100603636a84a6e4ce1c056" Jan 23 07:21:13 crc kubenswrapper[5102]: I0123 07:21:13.032664 5102 scope.go:117] "RemoveContainer" containerID="53a6ede6ca48bea0ecdc5fb2b5b3c71102206283a4315a7b0bf011fec4695cfb" Jan 23 07:21:13 crc kubenswrapper[5102]: I0123 07:21:13.068129 5102 scope.go:117] "RemoveContainer" containerID="eaf789feee4610f2cd2ae2c77e24ab5397a84a378cbf76fa694ebd576b9d3c39" Jan 23 07:21:13 crc kubenswrapper[5102]: I0123 07:21:13.096179 5102 scope.go:117] "RemoveContainer" containerID="e6c32afee6c95235234ebc27aeba6538794a2c26897e18df4ae434341472026c" Jan 23 07:21:13 crc kubenswrapper[5102]: I0123 07:21:13.125259 5102 scope.go:117] "RemoveContainer" containerID="089b7420219fcf0447d5dd76ce8a11dd0f0ad218a2cc2cfdd4db246cd9bf5143" Jan 23 07:21:16 crc kubenswrapper[5102]: I0123 07:21:16.768297 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:21:16 crc kubenswrapper[5102]: I0123 07:21:16.768708 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:21:46 crc kubenswrapper[5102]: I0123 07:21:46.769186 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:21:46 crc kubenswrapper[5102]: I0123 07:21:46.770172 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.535534 5102 scope.go:117] "RemoveContainer" containerID="a4b253153a5b0ae4b7304fc69166a78bdc78f9b33184fefd123a47d6a29e02a7" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.570656 5102 scope.go:117] "RemoveContainer" containerID="5078354f12a1faec162881f83a1d700354f0688d65cca708e2c62816bdad5e58" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.634879 5102 scope.go:117] "RemoveContainer" containerID="04a75c697ea1aac00dffbc51b878b9c90262d7c394882f3e8e4fead3dde40397" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.667361 5102 scope.go:117] "RemoveContainer" containerID="fab6d58a2b964d0a2c6ee9afb772804f0d0c70b36c9b83bbb60b32735ec7b64d" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.742871 5102 scope.go:117] "RemoveContainer" containerID="d62cf3f61ec961d54c0543a7c6db6538a2fa229a7aa3236626738a9910298f8a" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.780759 5102 scope.go:117] "RemoveContainer" containerID="38ba8da046ac14dc360b77fb7112dee42133d1a68989da117321421af10dcea2" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.814239 5102 scope.go:117] "RemoveContainer" containerID="72ddf8ff49e5a3193c48bebbeb2acb4f025af8aea7f170ec0287cc32ad8e0c2e" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.861513 5102 scope.go:117] "RemoveContainer" containerID="b5931e20a6ae974a9df4e142bed61f7199f857b21d6f031cee275cb71eed9329" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.930941 5102 scope.go:117] "RemoveContainer" containerID="1088d9848b17f47b3d88ce07e62e66b5442e4c6d98cebfd472e902a697152235" Jan 23 07:22:13 crc kubenswrapper[5102]: I0123 07:22:13.960721 5102 scope.go:117] "RemoveContainer" containerID="350918eff16d686330206ab3a6e27eb47607f9378ed6d5a60f0567ef662eb7e0" Jan 23 07:22:16 crc kubenswrapper[5102]: I0123 07:22:16.768794 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:22:16 crc kubenswrapper[5102]: I0123 07:22:16.769093 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:22:16 crc kubenswrapper[5102]: I0123 07:22:16.769144 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:22:16 crc kubenswrapper[5102]: I0123 07:22:16.769872 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:22:16 crc kubenswrapper[5102]: I0123 07:22:16.769948 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" gracePeriod=600 Jan 23 07:22:16 crc kubenswrapper[5102]: E0123 07:22:16.904851 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:22:17 crc kubenswrapper[5102]: I0123 07:22:17.099084 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" exitCode=0 Jan 23 07:22:17 crc kubenswrapper[5102]: I0123 07:22:17.099153 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458"} Jan 23 07:22:17 crc kubenswrapper[5102]: I0123 07:22:17.099825 5102 scope.go:117] "RemoveContainer" containerID="cdb7d8ca938d540b4197d3f803c2d9db00f127837a56b64d7ab62a996be59a8b" Jan 23 07:22:17 crc kubenswrapper[5102]: I0123 07:22:17.100645 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:22:17 crc kubenswrapper[5102]: E0123 07:22:17.101005 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:22:29 crc kubenswrapper[5102]: I0123 07:22:29.609826 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:22:29 crc kubenswrapper[5102]: E0123 07:22:29.611240 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:22:44 crc kubenswrapper[5102]: I0123 07:22:44.597908 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:22:44 crc kubenswrapper[5102]: E0123 07:22:44.598928 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:22:56 crc kubenswrapper[5102]: I0123 07:22:56.598147 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:22:56 crc kubenswrapper[5102]: E0123 07:22:56.599451 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:23:09 crc kubenswrapper[5102]: I0123 07:23:09.640161 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:23:09 crc kubenswrapper[5102]: E0123 07:23:09.641411 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.192242 5102 scope.go:117] "RemoveContainer" containerID="da75066d37eacfdc50001d433632728dac882b39846bcaa1807e9ee3518a7bde" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.265898 5102 scope.go:117] "RemoveContainer" containerID="ffc580056cbc54142fc39afab9eb0c2dcaaec5bbb399deaf0929c56ddb885f2f" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.292931 5102 scope.go:117] "RemoveContainer" containerID="56868845e6ca22358664384702fa4ce1246131afbda19ddf10daab40d90a98ba" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.327695 5102 scope.go:117] "RemoveContainer" containerID="4f979b76f22ef2e8f8509c19caa21930b8a908a0e7b25aba0b15129e8e286021" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.360872 5102 scope.go:117] "RemoveContainer" containerID="d16b0a4419002db2415cab085fc8a5390ea935e4fea5424b97b0f8ead9c68fef" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.394488 5102 scope.go:117] "RemoveContainer" containerID="37f4fbbf6a9f611f9265e2e7de99da29e4ce4bdea696864bf8cb79bb4d5056d4" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.425817 5102 scope.go:117] "RemoveContainer" containerID="edb6be1ebb76f4f9ff7af6fb07ea4a47e81a16d609e59271fddd77fb1ec66f5f" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.470672 5102 scope.go:117] "RemoveContainer" containerID="c91a8e896c4e37680e0983957da81be367f9c32a63a4da75e84a99d0e3b57364" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.505496 5102 scope.go:117] "RemoveContainer" containerID="773c01238764a0ed41158b255aa3c581891c36cf9b44486fe6ee3bcd08906b5d" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.560093 5102 scope.go:117] "RemoveContainer" containerID="107c5f3d9db926ad82c1955c3fd0cba07ea73b8de197843699ad0edcedf0354b" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.594087 5102 scope.go:117] "RemoveContainer" containerID="db0b5adeb2a555272b671c5a064386733799331b44521a212eb8c2b6d8db207a" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.623511 5102 scope.go:117] "RemoveContainer" containerID="868d87cea06d5b9482b8147a33f18e2828a731b3e1fb46272675463a760abf4f" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.648737 5102 scope.go:117] "RemoveContainer" containerID="80130ba290cd20c0ccdae1ffb0c1dcbd426c8d074791abc7cf555bdeaa367da6" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.678445 5102 scope.go:117] "RemoveContainer" containerID="82f639b8e1751ac8d69a9f889bd3bf8fd351aaa0b6dabbd38eb25e15eb84da0a" Jan 23 07:23:14 crc kubenswrapper[5102]: I0123 07:23:14.711120 5102 scope.go:117] "RemoveContainer" containerID="4075f0a8cfcd82513e6274dfeabdd2f8728604846eb7a3d2e3942e6e90d51c92" Jan 23 07:23:24 crc kubenswrapper[5102]: I0123 07:23:24.598904 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:23:24 crc kubenswrapper[5102]: E0123 07:23:24.599964 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:23:38 crc kubenswrapper[5102]: I0123 07:23:38.598695 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:23:38 crc kubenswrapper[5102]: E0123 07:23:38.599984 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:23:50 crc kubenswrapper[5102]: I0123 07:23:50.597960 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:23:50 crc kubenswrapper[5102]: E0123 07:23:50.601279 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:24:01 crc kubenswrapper[5102]: I0123 07:24:01.598993 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:24:01 crc kubenswrapper[5102]: E0123 07:24:01.601639 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:24:14 crc kubenswrapper[5102]: I0123 07:24:14.907289 5102 scope.go:117] "RemoveContainer" containerID="60739a227d7d472e20c6d49976fdefef7c5c808195298c9c502a74f3226d9f61" Jan 23 07:24:14 crc kubenswrapper[5102]: I0123 07:24:14.944126 5102 scope.go:117] "RemoveContainer" containerID="174c02431f1c59ad7f9abe23084157209612d312995dcf7b1c90091bc0f8b4d9" Jan 23 07:24:15 crc kubenswrapper[5102]: I0123 07:24:15.007732 5102 scope.go:117] "RemoveContainer" containerID="fb0523159313e01a611a22443a2546781a5f75a38f16084d9a077df4b793c4bb" Jan 23 07:24:15 crc kubenswrapper[5102]: I0123 07:24:15.078501 5102 scope.go:117] "RemoveContainer" containerID="83f6688858929172569adc29be5050e472a2b130da335ee56b80e52deff26332" Jan 23 07:24:16 crc kubenswrapper[5102]: I0123 07:24:16.598531 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:24:16 crc kubenswrapper[5102]: E0123 07:24:16.599315 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:24:30 crc kubenswrapper[5102]: I0123 07:24:30.598141 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:24:30 crc kubenswrapper[5102]: E0123 07:24:30.599583 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:24:42 crc kubenswrapper[5102]: I0123 07:24:42.598572 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:24:42 crc kubenswrapper[5102]: E0123 07:24:42.599831 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:24:53 crc kubenswrapper[5102]: I0123 07:24:53.598972 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:24:53 crc kubenswrapper[5102]: E0123 07:24:53.600291 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:25:06 crc kubenswrapper[5102]: I0123 07:25:06.600079 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:25:06 crc kubenswrapper[5102]: E0123 07:25:06.601314 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:25:15 crc kubenswrapper[5102]: I0123 07:25:15.240809 5102 scope.go:117] "RemoveContainer" containerID="4d7b6d603a00e934b80420f8abd17cabe70620c234e7bd375a48fc68ea87c3ac" Jan 23 07:25:15 crc kubenswrapper[5102]: I0123 07:25:15.283186 5102 scope.go:117] "RemoveContainer" containerID="37d6e75df8ea73ff65995440840d50d5ede836b60edd1fd4be81f18b7fb96153" Jan 23 07:25:19 crc kubenswrapper[5102]: I0123 07:25:19.606789 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:25:19 crc kubenswrapper[5102]: E0123 07:25:19.608062 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:25:30 crc kubenswrapper[5102]: I0123 07:25:30.598609 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:25:30 crc kubenswrapper[5102]: E0123 07:25:30.599447 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:25:42 crc kubenswrapper[5102]: I0123 07:25:42.598001 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:25:42 crc kubenswrapper[5102]: E0123 07:25:42.598819 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:25:55 crc kubenswrapper[5102]: I0123 07:25:55.599696 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:25:55 crc kubenswrapper[5102]: E0123 07:25:55.600680 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:26:06 crc kubenswrapper[5102]: I0123 07:26:06.598993 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:26:06 crc kubenswrapper[5102]: E0123 07:26:06.600110 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:26:17 crc kubenswrapper[5102]: I0123 07:26:17.601532 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:26:17 crc kubenswrapper[5102]: E0123 07:26:17.602846 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:26:30 crc kubenswrapper[5102]: I0123 07:26:30.598868 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:26:30 crc kubenswrapper[5102]: E0123 07:26:30.599784 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:26:44 crc kubenswrapper[5102]: I0123 07:26:44.599177 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:26:44 crc kubenswrapper[5102]: E0123 07:26:44.600117 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:26:55 crc kubenswrapper[5102]: I0123 07:26:55.599525 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:26:55 crc kubenswrapper[5102]: E0123 07:26:55.600319 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:27:09 crc kubenswrapper[5102]: I0123 07:27:09.606371 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:27:09 crc kubenswrapper[5102]: E0123 07:27:09.607898 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:27:24 crc kubenswrapper[5102]: I0123 07:27:24.598384 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:27:25 crc kubenswrapper[5102]: I0123 07:27:25.423644 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"a0181a5b950853e8921655cd9631ded00002efa35befb260f4c2da6eea78eba6"} Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.647016 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-bgts9"] Jan 23 07:28:37 crc kubenswrapper[5102]: E0123 07:28:37.648382 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="extract-utilities" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.648415 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="extract-utilities" Jan 23 07:28:37 crc kubenswrapper[5102]: E0123 07:28:37.648438 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="registry-server" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.648457 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="registry-server" Jan 23 07:28:37 crc kubenswrapper[5102]: E0123 07:28:37.648503 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="extract-content" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.648522 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="extract-content" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.648880 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="14db9f8a-a648-4b9e-a845-639556a9f476" containerName="registry-server" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.652101 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.708382 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bgts9"] Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.804908 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dwngc\" (UniqueName: \"kubernetes.io/projected/53ed5c26-09ba-4164-aa50-03ec20dde5f2-kube-api-access-dwngc\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.804977 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-utilities\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.805033 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-catalog-content\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.906171 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dwngc\" (UniqueName: \"kubernetes.io/projected/53ed5c26-09ba-4164-aa50-03ec20dde5f2-kube-api-access-dwngc\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.906218 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-utilities\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.906251 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-catalog-content\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.906725 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-catalog-content\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.907218 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-utilities\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:37 crc kubenswrapper[5102]: I0123 07:28:37.927791 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dwngc\" (UniqueName: \"kubernetes.io/projected/53ed5c26-09ba-4164-aa50-03ec20dde5f2-kube-api-access-dwngc\") pod \"redhat-operators-bgts9\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:38 crc kubenswrapper[5102]: I0123 07:28:38.032526 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:38 crc kubenswrapper[5102]: I0123 07:28:38.299086 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-bgts9"] Jan 23 07:28:39 crc kubenswrapper[5102]: I0123 07:28:39.123137 5102 generic.go:334] "Generic (PLEG): container finished" podID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerID="73575a0c5a2fc048fdfe28d1b66d34d5600da131498d52f9751987d8a31ecae1" exitCode=0 Jan 23 07:28:39 crc kubenswrapper[5102]: I0123 07:28:39.123463 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bgts9" event={"ID":"53ed5c26-09ba-4164-aa50-03ec20dde5f2","Type":"ContainerDied","Data":"73575a0c5a2fc048fdfe28d1b66d34d5600da131498d52f9751987d8a31ecae1"} Jan 23 07:28:39 crc kubenswrapper[5102]: I0123 07:28:39.123496 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bgts9" event={"ID":"53ed5c26-09ba-4164-aa50-03ec20dde5f2","Type":"ContainerStarted","Data":"823e350c25027cb6425a8dd6f15529b033ed5616213e05e5d814bff4a8e8eae3"} Jan 23 07:28:39 crc kubenswrapper[5102]: I0123 07:28:39.124900 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 07:28:42 crc kubenswrapper[5102]: I0123 07:28:42.153405 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bgts9" event={"ID":"53ed5c26-09ba-4164-aa50-03ec20dde5f2","Type":"ContainerStarted","Data":"19544b9f722471391563c33e3f4c56d20c6d94dd01851f36cf7f78086287c868"} Jan 23 07:28:43 crc kubenswrapper[5102]: I0123 07:28:43.165386 5102 generic.go:334] "Generic (PLEG): container finished" podID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerID="19544b9f722471391563c33e3f4c56d20c6d94dd01851f36cf7f78086287c868" exitCode=0 Jan 23 07:28:43 crc kubenswrapper[5102]: I0123 07:28:43.165464 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bgts9" event={"ID":"53ed5c26-09ba-4164-aa50-03ec20dde5f2","Type":"ContainerDied","Data":"19544b9f722471391563c33e3f4c56d20c6d94dd01851f36cf7f78086287c868"} Jan 23 07:28:45 crc kubenswrapper[5102]: I0123 07:28:45.181998 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bgts9" event={"ID":"53ed5c26-09ba-4164-aa50-03ec20dde5f2","Type":"ContainerStarted","Data":"3c01a90e85380474b7726586c309847d4d44748e57670633ff0bdc150b9fabae"} Jan 23 07:28:45 crc kubenswrapper[5102]: I0123 07:28:45.213060 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-bgts9" podStartSLOduration=3.069009755 podStartE2EDuration="8.213039505s" podCreationTimestamp="2026-01-23 07:28:37 +0000 UTC" firstStartedPulling="2026-01-23 07:28:39.124669746 +0000 UTC m=+2069.945018721" lastFinishedPulling="2026-01-23 07:28:44.268699496 +0000 UTC m=+2075.089048471" observedRunningTime="2026-01-23 07:28:45.205397664 +0000 UTC m=+2076.025746649" watchObservedRunningTime="2026-01-23 07:28:45.213039505 +0000 UTC m=+2076.033388500" Jan 23 07:28:48 crc kubenswrapper[5102]: I0123 07:28:48.033429 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:48 crc kubenswrapper[5102]: I0123 07:28:48.033854 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:49 crc kubenswrapper[5102]: I0123 07:28:49.096085 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-bgts9" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="registry-server" probeResult="failure" output=< Jan 23 07:28:49 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 07:28:49 crc kubenswrapper[5102]: > Jan 23 07:28:58 crc kubenswrapper[5102]: I0123 07:28:58.088720 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:58 crc kubenswrapper[5102]: I0123 07:28:58.156310 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:28:58 crc kubenswrapper[5102]: I0123 07:28:58.336646 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bgts9"] Jan 23 07:28:59 crc kubenswrapper[5102]: I0123 07:28:59.285287 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-bgts9" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="registry-server" containerID="cri-o://3c01a90e85380474b7726586c309847d4d44748e57670633ff0bdc150b9fabae" gracePeriod=2 Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.333142 5102 generic.go:334] "Generic (PLEG): container finished" podID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerID="3c01a90e85380474b7726586c309847d4d44748e57670633ff0bdc150b9fabae" exitCode=0 Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.333256 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bgts9" event={"ID":"53ed5c26-09ba-4164-aa50-03ec20dde5f2","Type":"ContainerDied","Data":"3c01a90e85380474b7726586c309847d4d44748e57670633ff0bdc150b9fabae"} Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.641387 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.753290 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-utilities\") pod \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.753398 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dwngc\" (UniqueName: \"kubernetes.io/projected/53ed5c26-09ba-4164-aa50-03ec20dde5f2-kube-api-access-dwngc\") pod \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.753474 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-catalog-content\") pod \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\" (UID: \"53ed5c26-09ba-4164-aa50-03ec20dde5f2\") " Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.755303 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-utilities" (OuterVolumeSpecName: "utilities") pod "53ed5c26-09ba-4164-aa50-03ec20dde5f2" (UID: "53ed5c26-09ba-4164-aa50-03ec20dde5f2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.759790 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/53ed5c26-09ba-4164-aa50-03ec20dde5f2-kube-api-access-dwngc" (OuterVolumeSpecName: "kube-api-access-dwngc") pod "53ed5c26-09ba-4164-aa50-03ec20dde5f2" (UID: "53ed5c26-09ba-4164-aa50-03ec20dde5f2"). InnerVolumeSpecName "kube-api-access-dwngc". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.855709 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.855756 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dwngc\" (UniqueName: \"kubernetes.io/projected/53ed5c26-09ba-4164-aa50-03ec20dde5f2-kube-api-access-dwngc\") on node \"crc\" DevicePath \"\"" Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.892075 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "53ed5c26-09ba-4164-aa50-03ec20dde5f2" (UID: "53ed5c26-09ba-4164-aa50-03ec20dde5f2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:29:03 crc kubenswrapper[5102]: I0123 07:29:03.957620 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/53ed5c26-09ba-4164-aa50-03ec20dde5f2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:29:04 crc kubenswrapper[5102]: I0123 07:29:04.344497 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-bgts9" event={"ID":"53ed5c26-09ba-4164-aa50-03ec20dde5f2","Type":"ContainerDied","Data":"823e350c25027cb6425a8dd6f15529b033ed5616213e05e5d814bff4a8e8eae3"} Jan 23 07:29:04 crc kubenswrapper[5102]: I0123 07:29:04.344647 5102 scope.go:117] "RemoveContainer" containerID="3c01a90e85380474b7726586c309847d4d44748e57670633ff0bdc150b9fabae" Jan 23 07:29:04 crc kubenswrapper[5102]: I0123 07:29:04.344742 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-bgts9" Jan 23 07:29:04 crc kubenswrapper[5102]: I0123 07:29:04.380329 5102 scope.go:117] "RemoveContainer" containerID="19544b9f722471391563c33e3f4c56d20c6d94dd01851f36cf7f78086287c868" Jan 23 07:29:04 crc kubenswrapper[5102]: I0123 07:29:04.406671 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-bgts9"] Jan 23 07:29:04 crc kubenswrapper[5102]: I0123 07:29:04.412626 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-bgts9"] Jan 23 07:29:04 crc kubenswrapper[5102]: I0123 07:29:04.423202 5102 scope.go:117] "RemoveContainer" containerID="73575a0c5a2fc048fdfe28d1b66d34d5600da131498d52f9751987d8a31ecae1" Jan 23 07:29:05 crc kubenswrapper[5102]: I0123 07:29:05.610056 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" path="/var/lib/kubelet/pods/53ed5c26-09ba-4164-aa50-03ec20dde5f2/volumes" Jan 23 07:29:46 crc kubenswrapper[5102]: I0123 07:29:46.767918 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:29:46 crc kubenswrapper[5102]: I0123 07:29:46.768335 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.182424 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm"] Jan 23 07:30:00 crc kubenswrapper[5102]: E0123 07:30:00.183294 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="extract-content" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.183311 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="extract-content" Jan 23 07:30:00 crc kubenswrapper[5102]: E0123 07:30:00.183341 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="extract-utilities" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.183350 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="extract-utilities" Jan 23 07:30:00 crc kubenswrapper[5102]: E0123 07:30:00.183370 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="registry-server" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.183378 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="registry-server" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.183555 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="53ed5c26-09ba-4164-aa50-03ec20dde5f2" containerName="registry-server" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.184110 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.187695 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.187924 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.193639 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm"] Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.248786 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-config-volume\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.248864 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-j8tqk\" (UniqueName: \"kubernetes.io/projected/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-kube-api-access-j8tqk\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.248917 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-secret-volume\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.350120 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-config-volume\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.350190 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-j8tqk\" (UniqueName: \"kubernetes.io/projected/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-kube-api-access-j8tqk\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.350234 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-secret-volume\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.354198 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-config-volume\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.359855 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-secret-volume\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.369064 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-j8tqk\" (UniqueName: \"kubernetes.io/projected/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-kube-api-access-j8tqk\") pod \"collect-profiles-29485890-jcxgm\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:00 crc kubenswrapper[5102]: I0123 07:30:00.546053 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:01 crc kubenswrapper[5102]: I0123 07:30:01.053900 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm"] Jan 23 07:30:01 crc kubenswrapper[5102]: I0123 07:30:01.851472 5102 generic.go:334] "Generic (PLEG): container finished" podID="d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" containerID="f8efcffc78b36bb69db74f192696ab626e8b58c9a847e1d9ad9e4118cab4a7fb" exitCode=0 Jan 23 07:30:01 crc kubenswrapper[5102]: I0123 07:30:01.851625 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" event={"ID":"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11","Type":"ContainerDied","Data":"f8efcffc78b36bb69db74f192696ab626e8b58c9a847e1d9ad9e4118cab4a7fb"} Jan 23 07:30:01 crc kubenswrapper[5102]: I0123 07:30:01.851881 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" event={"ID":"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11","Type":"ContainerStarted","Data":"bcf5d7b8265cd465c20640c8594998a75097548602d42cfd5af94435ec8385ee"} Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.291855 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.397767 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-j8tqk\" (UniqueName: \"kubernetes.io/projected/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-kube-api-access-j8tqk\") pod \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.397985 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-secret-volume\") pod \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.398130 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-config-volume\") pod \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\" (UID: \"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11\") " Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.399593 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-config-volume" (OuterVolumeSpecName: "config-volume") pod "d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" (UID: "d1ef1e0b-601f-4faa-bc86-2aaacd0cef11"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.405322 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" (UID: "d1ef1e0b-601f-4faa-bc86-2aaacd0cef11"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.415772 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-kube-api-access-j8tqk" (OuterVolumeSpecName: "kube-api-access-j8tqk") pod "d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" (UID: "d1ef1e0b-601f-4faa-bc86-2aaacd0cef11"). InnerVolumeSpecName "kube-api-access-j8tqk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.500101 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.500130 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.500141 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-j8tqk\" (UniqueName: \"kubernetes.io/projected/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11-kube-api-access-j8tqk\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.875847 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" event={"ID":"d1ef1e0b-601f-4faa-bc86-2aaacd0cef11","Type":"ContainerDied","Data":"bcf5d7b8265cd465c20640c8594998a75097548602d42cfd5af94435ec8385ee"} Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.875906 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="bcf5d7b8265cd465c20640c8594998a75097548602d42cfd5af94435ec8385ee" Jan 23 07:30:03 crc kubenswrapper[5102]: I0123 07:30:03.875973 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm" Jan 23 07:30:04 crc kubenswrapper[5102]: I0123 07:30:04.390569 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm"] Jan 23 07:30:04 crc kubenswrapper[5102]: I0123 07:30:04.400296 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485845-sm4fm"] Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.181960 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-tns6h"] Jan 23 07:30:05 crc kubenswrapper[5102]: E0123 07:30:05.182373 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" containerName="collect-profiles" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.182401 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" containerName="collect-profiles" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.182674 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" containerName="collect-profiles" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.184257 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.206232 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tns6h"] Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.330636 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-catalog-content\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.330711 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-utilities\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.330877 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p68dq\" (UniqueName: \"kubernetes.io/projected/1849e8d8-921b-40ea-86b5-120459acee40-kube-api-access-p68dq\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.432206 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-catalog-content\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.433384 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-utilities\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.433106 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-catalog-content\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.433520 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p68dq\" (UniqueName: \"kubernetes.io/projected/1849e8d8-921b-40ea-86b5-120459acee40-kube-api-access-p68dq\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.433882 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-utilities\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.477691 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p68dq\" (UniqueName: \"kubernetes.io/projected/1849e8d8-921b-40ea-86b5-120459acee40-kube-api-access-p68dq\") pod \"certified-operators-tns6h\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.500776 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.607748 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ac5092a2-a268-42c6-98be-b902ae96f92f" path="/var/lib/kubelet/pods/ac5092a2-a268-42c6-98be-b902ae96f92f/volumes" Jan 23 07:30:05 crc kubenswrapper[5102]: I0123 07:30:05.967574 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-tns6h"] Jan 23 07:30:06 crc kubenswrapper[5102]: I0123 07:30:06.902379 5102 generic.go:334] "Generic (PLEG): container finished" podID="1849e8d8-921b-40ea-86b5-120459acee40" containerID="9b581283337c8fa97524052922c52e6f4e5a8fdf2dbac461a564ee51769289fa" exitCode=0 Jan 23 07:30:06 crc kubenswrapper[5102]: I0123 07:30:06.902593 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tns6h" event={"ID":"1849e8d8-921b-40ea-86b5-120459acee40","Type":"ContainerDied","Data":"9b581283337c8fa97524052922c52e6f4e5a8fdf2dbac461a564ee51769289fa"} Jan 23 07:30:06 crc kubenswrapper[5102]: I0123 07:30:06.902777 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tns6h" event={"ID":"1849e8d8-921b-40ea-86b5-120459acee40","Type":"ContainerStarted","Data":"8c96a9c01ad378a9eec9b700ab9918f8c34927b750aeaefc06cf477fd57311fb"} Jan 23 07:30:07 crc kubenswrapper[5102]: I0123 07:30:07.916630 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tns6h" event={"ID":"1849e8d8-921b-40ea-86b5-120459acee40","Type":"ContainerStarted","Data":"58bba73a3cfa23edd1511bc1196e90a4d556929010d9b3b1617ab60d751db3d8"} Jan 23 07:30:08 crc kubenswrapper[5102]: I0123 07:30:08.928821 5102 generic.go:334] "Generic (PLEG): container finished" podID="1849e8d8-921b-40ea-86b5-120459acee40" containerID="58bba73a3cfa23edd1511bc1196e90a4d556929010d9b3b1617ab60d751db3d8" exitCode=0 Jan 23 07:30:08 crc kubenswrapper[5102]: I0123 07:30:08.928909 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tns6h" event={"ID":"1849e8d8-921b-40ea-86b5-120459acee40","Type":"ContainerDied","Data":"58bba73a3cfa23edd1511bc1196e90a4d556929010d9b3b1617ab60d751db3d8"} Jan 23 07:30:09 crc kubenswrapper[5102]: I0123 07:30:09.942447 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tns6h" event={"ID":"1849e8d8-921b-40ea-86b5-120459acee40","Type":"ContainerStarted","Data":"f03349c4ae8b77f8f8add552d00e3387dc679e1f37be26d418503d40f221e830"} Jan 23 07:30:09 crc kubenswrapper[5102]: I0123 07:30:09.968219 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-tns6h" podStartSLOduration=2.489185147 podStartE2EDuration="4.96820295s" podCreationTimestamp="2026-01-23 07:30:05 +0000 UTC" firstStartedPulling="2026-01-23 07:30:06.90405174 +0000 UTC m=+2157.724400725" lastFinishedPulling="2026-01-23 07:30:09.383069543 +0000 UTC m=+2160.203418528" observedRunningTime="2026-01-23 07:30:09.96625148 +0000 UTC m=+2160.786600465" watchObservedRunningTime="2026-01-23 07:30:09.96820295 +0000 UTC m=+2160.788551935" Jan 23 07:30:15 crc kubenswrapper[5102]: I0123 07:30:15.421843 5102 scope.go:117] "RemoveContainer" containerID="a0bd276003e94b7d0d858491a9c0d57240f58f4dd4a30e9b41902cbb17e87fca" Jan 23 07:30:15 crc kubenswrapper[5102]: I0123 07:30:15.501140 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:15 crc kubenswrapper[5102]: I0123 07:30:15.501686 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:15 crc kubenswrapper[5102]: I0123 07:30:15.561100 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:16 crc kubenswrapper[5102]: I0123 07:30:16.082749 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:16 crc kubenswrapper[5102]: I0123 07:30:16.145982 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tns6h"] Jan 23 07:30:16 crc kubenswrapper[5102]: I0123 07:30:16.768298 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:30:16 crc kubenswrapper[5102]: I0123 07:30:16.769746 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:30:18 crc kubenswrapper[5102]: I0123 07:30:18.016127 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-tns6h" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="registry-server" containerID="cri-o://f03349c4ae8b77f8f8add552d00e3387dc679e1f37be26d418503d40f221e830" gracePeriod=2 Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.026517 5102 generic.go:334] "Generic (PLEG): container finished" podID="1849e8d8-921b-40ea-86b5-120459acee40" containerID="f03349c4ae8b77f8f8add552d00e3387dc679e1f37be26d418503d40f221e830" exitCode=0 Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.026682 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tns6h" event={"ID":"1849e8d8-921b-40ea-86b5-120459acee40","Type":"ContainerDied","Data":"f03349c4ae8b77f8f8add552d00e3387dc679e1f37be26d418503d40f221e830"} Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.178891 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.359662 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-catalog-content\") pod \"1849e8d8-921b-40ea-86b5-120459acee40\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.359765 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p68dq\" (UniqueName: \"kubernetes.io/projected/1849e8d8-921b-40ea-86b5-120459acee40-kube-api-access-p68dq\") pod \"1849e8d8-921b-40ea-86b5-120459acee40\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.359808 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-utilities\") pod \"1849e8d8-921b-40ea-86b5-120459acee40\" (UID: \"1849e8d8-921b-40ea-86b5-120459acee40\") " Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.361895 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-utilities" (OuterVolumeSpecName: "utilities") pod "1849e8d8-921b-40ea-86b5-120459acee40" (UID: "1849e8d8-921b-40ea-86b5-120459acee40"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.370496 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1849e8d8-921b-40ea-86b5-120459acee40-kube-api-access-p68dq" (OuterVolumeSpecName: "kube-api-access-p68dq") pod "1849e8d8-921b-40ea-86b5-120459acee40" (UID: "1849e8d8-921b-40ea-86b5-120459acee40"). InnerVolumeSpecName "kube-api-access-p68dq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.447059 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1849e8d8-921b-40ea-86b5-120459acee40" (UID: "1849e8d8-921b-40ea-86b5-120459acee40"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.462124 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.462179 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p68dq\" (UniqueName: \"kubernetes.io/projected/1849e8d8-921b-40ea-86b5-120459acee40-kube-api-access-p68dq\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:19 crc kubenswrapper[5102]: I0123 07:30:19.462203 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1849e8d8-921b-40ea-86b5-120459acee40-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:20 crc kubenswrapper[5102]: I0123 07:30:20.042500 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-tns6h" event={"ID":"1849e8d8-921b-40ea-86b5-120459acee40","Type":"ContainerDied","Data":"8c96a9c01ad378a9eec9b700ab9918f8c34927b750aeaefc06cf477fd57311fb"} Jan 23 07:30:20 crc kubenswrapper[5102]: I0123 07:30:20.042630 5102 scope.go:117] "RemoveContainer" containerID="f03349c4ae8b77f8f8add552d00e3387dc679e1f37be26d418503d40f221e830" Jan 23 07:30:20 crc kubenswrapper[5102]: I0123 07:30:20.042634 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-tns6h" Jan 23 07:30:20 crc kubenswrapper[5102]: I0123 07:30:20.079215 5102 scope.go:117] "RemoveContainer" containerID="58bba73a3cfa23edd1511bc1196e90a4d556929010d9b3b1617ab60d751db3d8" Jan 23 07:30:20 crc kubenswrapper[5102]: I0123 07:30:20.082001 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-tns6h"] Jan 23 07:30:20 crc kubenswrapper[5102]: I0123 07:30:20.093602 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-tns6h"] Jan 23 07:30:20 crc kubenswrapper[5102]: I0123 07:30:20.121602 5102 scope.go:117] "RemoveContainer" containerID="9b581283337c8fa97524052922c52e6f4e5a8fdf2dbac461a564ee51769289fa" Jan 23 07:30:21 crc kubenswrapper[5102]: I0123 07:30:21.615627 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1849e8d8-921b-40ea-86b5-120459acee40" path="/var/lib/kubelet/pods/1849e8d8-921b-40ea-86b5-120459acee40/volumes" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.205494 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-s2fn6"] Jan 23 07:30:35 crc kubenswrapper[5102]: E0123 07:30:35.206666 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="extract-utilities" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.206689 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="extract-utilities" Jan 23 07:30:35 crc kubenswrapper[5102]: E0123 07:30:35.206712 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="extract-content" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.206724 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="extract-content" Jan 23 07:30:35 crc kubenswrapper[5102]: E0123 07:30:35.206748 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="registry-server" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.206762 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="registry-server" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.207022 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1849e8d8-921b-40ea-86b5-120459acee40" containerName="registry-server" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.208872 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.234708 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s2fn6"] Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.336829 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-utilities\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.336905 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-catalog-content\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.336999 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fxn2t\" (UniqueName: \"kubernetes.io/projected/cef9226e-90d8-4b40-9297-7c75ecf130a9-kube-api-access-fxn2t\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.438348 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fxn2t\" (UniqueName: \"kubernetes.io/projected/cef9226e-90d8-4b40-9297-7c75ecf130a9-kube-api-access-fxn2t\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.438446 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-utilities\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.438488 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-catalog-content\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.439061 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-utilities\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.439139 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-catalog-content\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.471524 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fxn2t\" (UniqueName: \"kubernetes.io/projected/cef9226e-90d8-4b40-9297-7c75ecf130a9-kube-api-access-fxn2t\") pod \"community-operators-s2fn6\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:35 crc kubenswrapper[5102]: I0123 07:30:35.548910 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:36 crc kubenswrapper[5102]: I0123 07:30:36.066199 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-s2fn6"] Jan 23 07:30:36 crc kubenswrapper[5102]: W0123 07:30:36.078755 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podcef9226e_90d8_4b40_9297_7c75ecf130a9.slice/crio-c4034ed2f440c4b32c1de46c71abbd6b676d5953ff029940e252147eabbbfa59 WatchSource:0}: Error finding container c4034ed2f440c4b32c1de46c71abbd6b676d5953ff029940e252147eabbbfa59: Status 404 returned error can't find the container with id c4034ed2f440c4b32c1de46c71abbd6b676d5953ff029940e252147eabbbfa59 Jan 23 07:30:36 crc kubenswrapper[5102]: I0123 07:30:36.227248 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s2fn6" event={"ID":"cef9226e-90d8-4b40-9297-7c75ecf130a9","Type":"ContainerStarted","Data":"c4034ed2f440c4b32c1de46c71abbd6b676d5953ff029940e252147eabbbfa59"} Jan 23 07:30:37 crc kubenswrapper[5102]: I0123 07:30:37.242464 5102 generic.go:334] "Generic (PLEG): container finished" podID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerID="4680dc2853c15c20d992bc82a5db15df36dca7c3766afd1a73c7f94ab913ecca" exitCode=0 Jan 23 07:30:37 crc kubenswrapper[5102]: I0123 07:30:37.242724 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s2fn6" event={"ID":"cef9226e-90d8-4b40-9297-7c75ecf130a9","Type":"ContainerDied","Data":"4680dc2853c15c20d992bc82a5db15df36dca7c3766afd1a73c7f94ab913ecca"} Jan 23 07:30:38 crc kubenswrapper[5102]: I0123 07:30:38.254974 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s2fn6" event={"ID":"cef9226e-90d8-4b40-9297-7c75ecf130a9","Type":"ContainerStarted","Data":"10ea317152a051293671d5abbecd541386fa6825f50a1399adb9e8695311130e"} Jan 23 07:30:39 crc kubenswrapper[5102]: I0123 07:30:39.279365 5102 generic.go:334] "Generic (PLEG): container finished" podID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerID="10ea317152a051293671d5abbecd541386fa6825f50a1399adb9e8695311130e" exitCode=0 Jan 23 07:30:39 crc kubenswrapper[5102]: I0123 07:30:39.279488 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s2fn6" event={"ID":"cef9226e-90d8-4b40-9297-7c75ecf130a9","Type":"ContainerDied","Data":"10ea317152a051293671d5abbecd541386fa6825f50a1399adb9e8695311130e"} Jan 23 07:30:39 crc kubenswrapper[5102]: I0123 07:30:39.989869 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-5grfs"] Jan 23 07:30:39 crc kubenswrapper[5102]: I0123 07:30:39.997861 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.005231 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grfs"] Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.153385 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-catalog-content\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.153445 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqlwk\" (UniqueName: \"kubernetes.io/projected/da557099-0364-4c8f-9d82-18cb4c648aed-kube-api-access-pqlwk\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.153618 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-utilities\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.255396 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-catalog-content\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.255454 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqlwk\" (UniqueName: \"kubernetes.io/projected/da557099-0364-4c8f-9d82-18cb4c648aed-kube-api-access-pqlwk\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.255517 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-utilities\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.255982 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-catalog-content\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.256007 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-utilities\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.289591 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqlwk\" (UniqueName: \"kubernetes.io/projected/da557099-0364-4c8f-9d82-18cb4c648aed-kube-api-access-pqlwk\") pod \"redhat-marketplace-5grfs\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.293827 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s2fn6" event={"ID":"cef9226e-90d8-4b40-9297-7c75ecf130a9","Type":"ContainerStarted","Data":"c3b567d33e3ff50e150607fa40717a2c00764ba1b37e85c0cc52aac24eeecefc"} Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.315955 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.579238 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-s2fn6" podStartSLOduration=3.101851529 podStartE2EDuration="5.579218501s" podCreationTimestamp="2026-01-23 07:30:35 +0000 UTC" firstStartedPulling="2026-01-23 07:30:37.245182546 +0000 UTC m=+2188.065531551" lastFinishedPulling="2026-01-23 07:30:39.722549548 +0000 UTC m=+2190.542898523" observedRunningTime="2026-01-23 07:30:40.32400325 +0000 UTC m=+2191.144352235" watchObservedRunningTime="2026-01-23 07:30:40.579218501 +0000 UTC m=+2191.399567476" Jan 23 07:30:40 crc kubenswrapper[5102]: I0123 07:30:40.579652 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grfs"] Jan 23 07:30:41 crc kubenswrapper[5102]: I0123 07:30:41.305454 5102 generic.go:334] "Generic (PLEG): container finished" podID="da557099-0364-4c8f-9d82-18cb4c648aed" containerID="fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a" exitCode=0 Jan 23 07:30:41 crc kubenswrapper[5102]: I0123 07:30:41.305571 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grfs" event={"ID":"da557099-0364-4c8f-9d82-18cb4c648aed","Type":"ContainerDied","Data":"fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a"} Jan 23 07:30:41 crc kubenswrapper[5102]: I0123 07:30:41.307344 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grfs" event={"ID":"da557099-0364-4c8f-9d82-18cb4c648aed","Type":"ContainerStarted","Data":"0a1b1ce2e79d6c73f60b99a04e9ae48e4e55688b0eb171d3bb065fb902aac509"} Jan 23 07:30:43 crc kubenswrapper[5102]: I0123 07:30:43.331269 5102 generic.go:334] "Generic (PLEG): container finished" podID="da557099-0364-4c8f-9d82-18cb4c648aed" containerID="bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313" exitCode=0 Jan 23 07:30:43 crc kubenswrapper[5102]: I0123 07:30:43.331414 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grfs" event={"ID":"da557099-0364-4c8f-9d82-18cb4c648aed","Type":"ContainerDied","Data":"bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313"} Jan 23 07:30:45 crc kubenswrapper[5102]: I0123 07:30:45.348414 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grfs" event={"ID":"da557099-0364-4c8f-9d82-18cb4c648aed","Type":"ContainerStarted","Data":"2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf"} Jan 23 07:30:45 crc kubenswrapper[5102]: I0123 07:30:45.549616 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:45 crc kubenswrapper[5102]: I0123 07:30:45.549703 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:45 crc kubenswrapper[5102]: I0123 07:30:45.611954 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:45 crc kubenswrapper[5102]: I0123 07:30:45.636079 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-5grfs" podStartSLOduration=3.601104604 podStartE2EDuration="6.636053619s" podCreationTimestamp="2026-01-23 07:30:39 +0000 UTC" firstStartedPulling="2026-01-23 07:30:41.308177525 +0000 UTC m=+2192.128526530" lastFinishedPulling="2026-01-23 07:30:44.34312657 +0000 UTC m=+2195.163475545" observedRunningTime="2026-01-23 07:30:45.372487954 +0000 UTC m=+2196.192836959" watchObservedRunningTime="2026-01-23 07:30:45.636053619 +0000 UTC m=+2196.456402604" Jan 23 07:30:46 crc kubenswrapper[5102]: I0123 07:30:46.435372 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:46 crc kubenswrapper[5102]: I0123 07:30:46.751512 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s2fn6"] Jan 23 07:30:46 crc kubenswrapper[5102]: I0123 07:30:46.768469 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:30:46 crc kubenswrapper[5102]: I0123 07:30:46.768521 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:30:46 crc kubenswrapper[5102]: I0123 07:30:46.768632 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:30:46 crc kubenswrapper[5102]: I0123 07:30:46.769280 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a0181a5b950853e8921655cd9631ded00002efa35befb260f4c2da6eea78eba6"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:30:46 crc kubenswrapper[5102]: I0123 07:30:46.769349 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://a0181a5b950853e8921655cd9631ded00002efa35befb260f4c2da6eea78eba6" gracePeriod=600 Jan 23 07:30:47 crc kubenswrapper[5102]: I0123 07:30:47.389811 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="a0181a5b950853e8921655cd9631ded00002efa35befb260f4c2da6eea78eba6" exitCode=0 Jan 23 07:30:47 crc kubenswrapper[5102]: I0123 07:30:47.390233 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"a0181a5b950853e8921655cd9631ded00002efa35befb260f4c2da6eea78eba6"} Jan 23 07:30:47 crc kubenswrapper[5102]: I0123 07:30:47.390301 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337"} Jan 23 07:30:47 crc kubenswrapper[5102]: I0123 07:30:47.390332 5102 scope.go:117] "RemoveContainer" containerID="81665bb34afe3f50f34beb9434b3620e57aa5bd76247cdff4fe7753fb10ac458" Jan 23 07:30:48 crc kubenswrapper[5102]: I0123 07:30:48.409047 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-s2fn6" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="registry-server" containerID="cri-o://c3b567d33e3ff50e150607fa40717a2c00764ba1b37e85c0cc52aac24eeecefc" gracePeriod=2 Jan 23 07:30:49 crc kubenswrapper[5102]: I0123 07:30:49.423137 5102 generic.go:334] "Generic (PLEG): container finished" podID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerID="c3b567d33e3ff50e150607fa40717a2c00764ba1b37e85c0cc52aac24eeecefc" exitCode=0 Jan 23 07:30:49 crc kubenswrapper[5102]: I0123 07:30:49.423248 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s2fn6" event={"ID":"cef9226e-90d8-4b40-9297-7c75ecf130a9","Type":"ContainerDied","Data":"c3b567d33e3ff50e150607fa40717a2c00764ba1b37e85c0cc52aac24eeecefc"} Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.092636 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.135188 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fxn2t\" (UniqueName: \"kubernetes.io/projected/cef9226e-90d8-4b40-9297-7c75ecf130a9-kube-api-access-fxn2t\") pod \"cef9226e-90d8-4b40-9297-7c75ecf130a9\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.135298 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-catalog-content\") pod \"cef9226e-90d8-4b40-9297-7c75ecf130a9\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.135364 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-utilities\") pod \"cef9226e-90d8-4b40-9297-7c75ecf130a9\" (UID: \"cef9226e-90d8-4b40-9297-7c75ecf130a9\") " Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.139072 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-utilities" (OuterVolumeSpecName: "utilities") pod "cef9226e-90d8-4b40-9297-7c75ecf130a9" (UID: "cef9226e-90d8-4b40-9297-7c75ecf130a9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.147177 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cef9226e-90d8-4b40-9297-7c75ecf130a9-kube-api-access-fxn2t" (OuterVolumeSpecName: "kube-api-access-fxn2t") pod "cef9226e-90d8-4b40-9297-7c75ecf130a9" (UID: "cef9226e-90d8-4b40-9297-7c75ecf130a9"). InnerVolumeSpecName "kube-api-access-fxn2t". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.223963 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cef9226e-90d8-4b40-9297-7c75ecf130a9" (UID: "cef9226e-90d8-4b40-9297-7c75ecf130a9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.236655 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fxn2t\" (UniqueName: \"kubernetes.io/projected/cef9226e-90d8-4b40-9297-7c75ecf130a9-kube-api-access-fxn2t\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.236686 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.236698 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cef9226e-90d8-4b40-9297-7c75ecf130a9-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.316097 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.316191 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.395626 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.453856 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-s2fn6" event={"ID":"cef9226e-90d8-4b40-9297-7c75ecf130a9","Type":"ContainerDied","Data":"c4034ed2f440c4b32c1de46c71abbd6b676d5953ff029940e252147eabbbfa59"} Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.453919 5102 scope.go:117] "RemoveContainer" containerID="c3b567d33e3ff50e150607fa40717a2c00764ba1b37e85c0cc52aac24eeecefc" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.453930 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-s2fn6" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.477608 5102 scope.go:117] "RemoveContainer" containerID="10ea317152a051293671d5abbecd541386fa6825f50a1399adb9e8695311130e" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.518167 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-s2fn6"] Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.520304 5102 scope.go:117] "RemoveContainer" containerID="4680dc2853c15c20d992bc82a5db15df36dca7c3766afd1a73c7f94ab913ecca" Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.524020 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-s2fn6"] Jan 23 07:30:50 crc kubenswrapper[5102]: I0123 07:30:50.534954 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:51 crc kubenswrapper[5102]: I0123 07:30:51.617448 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" path="/var/lib/kubelet/pods/cef9226e-90d8-4b40-9297-7c75ecf130a9/volumes" Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.158049 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grfs"] Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.473078 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-5grfs" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="registry-server" containerID="cri-o://2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf" gracePeriod=2 Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.977378 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.981810 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-utilities\") pod \"da557099-0364-4c8f-9d82-18cb4c648aed\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.981945 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-catalog-content\") pod \"da557099-0364-4c8f-9d82-18cb4c648aed\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.981967 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pqlwk\" (UniqueName: \"kubernetes.io/projected/da557099-0364-4c8f-9d82-18cb4c648aed-kube-api-access-pqlwk\") pod \"da557099-0364-4c8f-9d82-18cb4c648aed\" (UID: \"da557099-0364-4c8f-9d82-18cb4c648aed\") " Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.983725 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-utilities" (OuterVolumeSpecName: "utilities") pod "da557099-0364-4c8f-9d82-18cb4c648aed" (UID: "da557099-0364-4c8f-9d82-18cb4c648aed"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:30:52 crc kubenswrapper[5102]: I0123 07:30:52.991741 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da557099-0364-4c8f-9d82-18cb4c648aed-kube-api-access-pqlwk" (OuterVolumeSpecName: "kube-api-access-pqlwk") pod "da557099-0364-4c8f-9d82-18cb4c648aed" (UID: "da557099-0364-4c8f-9d82-18cb4c648aed"). InnerVolumeSpecName "kube-api-access-pqlwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.014985 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "da557099-0364-4c8f-9d82-18cb4c648aed" (UID: "da557099-0364-4c8f-9d82-18cb4c648aed"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.083349 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.083385 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da557099-0364-4c8f-9d82-18cb4c648aed-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.083400 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pqlwk\" (UniqueName: \"kubernetes.io/projected/da557099-0364-4c8f-9d82-18cb4c648aed-kube-api-access-pqlwk\") on node \"crc\" DevicePath \"\"" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.489870 5102 generic.go:334] "Generic (PLEG): container finished" podID="da557099-0364-4c8f-9d82-18cb4c648aed" containerID="2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf" exitCode=0 Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.489946 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grfs" event={"ID":"da557099-0364-4c8f-9d82-18cb4c648aed","Type":"ContainerDied","Data":"2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf"} Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.490001 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-5grfs" event={"ID":"da557099-0364-4c8f-9d82-18cb4c648aed","Type":"ContainerDied","Data":"0a1b1ce2e79d6c73f60b99a04e9ae48e4e55688b0eb171d3bb065fb902aac509"} Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.490014 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-5grfs" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.490094 5102 scope.go:117] "RemoveContainer" containerID="2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.525630 5102 scope.go:117] "RemoveContainer" containerID="bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.544579 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grfs"] Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.551163 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-5grfs"] Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.563082 5102 scope.go:117] "RemoveContainer" containerID="fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.600289 5102 scope.go:117] "RemoveContainer" containerID="2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf" Jan 23 07:30:53 crc kubenswrapper[5102]: E0123 07:30:53.600811 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf\": container with ID starting with 2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf not found: ID does not exist" containerID="2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.600852 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf"} err="failed to get container status \"2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf\": rpc error: code = NotFound desc = could not find container \"2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf\": container with ID starting with 2ad289e51722dd6e8a6c518b2027ac8e0caeb7e1656ec02c5fffa9fa2d931bbf not found: ID does not exist" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.600877 5102 scope.go:117] "RemoveContainer" containerID="bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313" Jan 23 07:30:53 crc kubenswrapper[5102]: E0123 07:30:53.601574 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313\": container with ID starting with bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313 not found: ID does not exist" containerID="bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.601601 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313"} err="failed to get container status \"bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313\": rpc error: code = NotFound desc = could not find container \"bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313\": container with ID starting with bb8da2942e4e300023f87ee4d5897bd133def3d8ca32e27362cab96894c22313 not found: ID does not exist" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.601617 5102 scope.go:117] "RemoveContainer" containerID="fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a" Jan 23 07:30:53 crc kubenswrapper[5102]: E0123 07:30:53.601876 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a\": container with ID starting with fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a not found: ID does not exist" containerID="fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.601904 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a"} err="failed to get container status \"fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a\": rpc error: code = NotFound desc = could not find container \"fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a\": container with ID starting with fe21266ab9b9ed7d9f86d137ead322c6fb0e3b9468f92174e35c9476e2bf879a not found: ID does not exist" Jan 23 07:30:53 crc kubenswrapper[5102]: I0123 07:30:53.610657 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" path="/var/lib/kubelet/pods/da557099-0364-4c8f-9d82-18cb4c648aed/volumes" Jan 23 07:33:16 crc kubenswrapper[5102]: I0123 07:33:16.768673 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:33:16 crc kubenswrapper[5102]: I0123 07:33:16.769184 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:33:46 crc kubenswrapper[5102]: I0123 07:33:46.768498 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:33:46 crc kubenswrapper[5102]: I0123 07:33:46.769140 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:34:16 crc kubenswrapper[5102]: I0123 07:34:16.768066 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:34:17 crc kubenswrapper[5102]: I0123 07:34:16.768760 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:34:17 crc kubenswrapper[5102]: I0123 07:34:16.768885 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:34:17 crc kubenswrapper[5102]: I0123 07:34:16.769615 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:34:17 crc kubenswrapper[5102]: I0123 07:34:16.769720 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" gracePeriod=600 Jan 23 07:34:17 crc kubenswrapper[5102]: I0123 07:34:17.420224 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" exitCode=0 Jan 23 07:34:17 crc kubenswrapper[5102]: I0123 07:34:17.420311 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337"} Jan 23 07:34:17 crc kubenswrapper[5102]: I0123 07:34:17.420396 5102 scope.go:117] "RemoveContainer" containerID="a0181a5b950853e8921655cd9631ded00002efa35befb260f4c2da6eea78eba6" Jan 23 07:34:17 crc kubenswrapper[5102]: E0123 07:34:17.455049 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:34:18 crc kubenswrapper[5102]: I0123 07:34:18.434973 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:34:18 crc kubenswrapper[5102]: E0123 07:34:18.435749 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:34:33 crc kubenswrapper[5102]: I0123 07:34:33.597825 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:34:33 crc kubenswrapper[5102]: E0123 07:34:33.598707 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:34:47 crc kubenswrapper[5102]: I0123 07:34:47.598453 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:34:47 crc kubenswrapper[5102]: E0123 07:34:47.599348 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:35:00 crc kubenswrapper[5102]: I0123 07:35:00.598588 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:35:00 crc kubenswrapper[5102]: E0123 07:35:00.600318 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:35:11 crc kubenswrapper[5102]: I0123 07:35:11.598418 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:35:11 crc kubenswrapper[5102]: E0123 07:35:11.599738 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:35:23 crc kubenswrapper[5102]: I0123 07:35:23.598999 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:35:23 crc kubenswrapper[5102]: E0123 07:35:23.600136 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:35:34 crc kubenswrapper[5102]: I0123 07:35:34.598467 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:35:34 crc kubenswrapper[5102]: E0123 07:35:34.599317 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:35:47 crc kubenswrapper[5102]: I0123 07:35:47.598986 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:35:47 crc kubenswrapper[5102]: E0123 07:35:47.600223 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:36:02 crc kubenswrapper[5102]: I0123 07:36:02.598010 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:36:02 crc kubenswrapper[5102]: E0123 07:36:02.598859 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:36:17 crc kubenswrapper[5102]: I0123 07:36:17.598891 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:36:17 crc kubenswrapper[5102]: E0123 07:36:17.599923 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:36:28 crc kubenswrapper[5102]: I0123 07:36:28.598399 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:36:28 crc kubenswrapper[5102]: E0123 07:36:28.599509 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:36:42 crc kubenswrapper[5102]: I0123 07:36:42.597792 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:36:42 crc kubenswrapper[5102]: E0123 07:36:42.598997 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:36:54 crc kubenswrapper[5102]: I0123 07:36:54.599173 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:36:54 crc kubenswrapper[5102]: E0123 07:36:54.600478 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:37:05 crc kubenswrapper[5102]: I0123 07:37:05.598107 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:37:05 crc kubenswrapper[5102]: E0123 07:37:05.598907 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:37:16 crc kubenswrapper[5102]: I0123 07:37:16.598408 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:37:16 crc kubenswrapper[5102]: E0123 07:37:16.600378 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:37:30 crc kubenswrapper[5102]: I0123 07:37:30.598386 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:37:30 crc kubenswrapper[5102]: E0123 07:37:30.599344 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:37:43 crc kubenswrapper[5102]: I0123 07:37:43.598957 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:37:43 crc kubenswrapper[5102]: E0123 07:37:43.599732 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:37:56 crc kubenswrapper[5102]: I0123 07:37:56.599071 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:37:56 crc kubenswrapper[5102]: E0123 07:37:56.600114 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:38:09 crc kubenswrapper[5102]: I0123 07:38:09.603204 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:38:09 crc kubenswrapper[5102]: E0123 07:38:09.603998 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:38:23 crc kubenswrapper[5102]: I0123 07:38:23.598050 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:38:23 crc kubenswrapper[5102]: E0123 07:38:23.599023 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:38:37 crc kubenswrapper[5102]: I0123 07:38:37.598748 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:38:37 crc kubenswrapper[5102]: E0123 07:38:37.599518 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:38:48 crc kubenswrapper[5102]: I0123 07:38:48.598974 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:38:48 crc kubenswrapper[5102]: E0123 07:38:48.600187 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:38:59 crc kubenswrapper[5102]: I0123 07:38:59.608069 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:38:59 crc kubenswrapper[5102]: E0123 07:38:59.609115 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:39:14 crc kubenswrapper[5102]: I0123 07:39:14.598706 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:39:14 crc kubenswrapper[5102]: E0123 07:39:14.599388 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:39:29 crc kubenswrapper[5102]: I0123 07:39:29.602949 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:39:30 crc kubenswrapper[5102]: I0123 07:39:30.078791 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"a9cc721d80c7b6b8f98577ee94672d949fd5c14371a18ae1383e7f6ac5c86e63"} Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.330255 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-nstsn"] Jan 23 07:40:51 crc kubenswrapper[5102]: E0123 07:40:51.331175 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="registry-server" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331191 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="registry-server" Jan 23 07:40:51 crc kubenswrapper[5102]: E0123 07:40:51.331213 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="extract-utilities" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331220 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="extract-utilities" Jan 23 07:40:51 crc kubenswrapper[5102]: E0123 07:40:51.331247 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="extract-utilities" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331255 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="extract-utilities" Jan 23 07:40:51 crc kubenswrapper[5102]: E0123 07:40:51.331267 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="extract-content" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331274 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="extract-content" Jan 23 07:40:51 crc kubenswrapper[5102]: E0123 07:40:51.331281 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="extract-content" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331288 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="extract-content" Jan 23 07:40:51 crc kubenswrapper[5102]: E0123 07:40:51.331305 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="registry-server" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331312 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="registry-server" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331464 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="cef9226e-90d8-4b40-9297-7c75ecf130a9" containerName="registry-server" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.331477 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="da557099-0364-4c8f-9d82-18cb4c648aed" containerName="registry-server" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.332742 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.350092 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nstsn"] Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.500481 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-catalog-content\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.500618 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-utilities\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.500713 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6l9x7\" (UniqueName: \"kubernetes.io/projected/099349a9-aeb0-4c5d-b1a7-25519bc08836-kube-api-access-6l9x7\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.603663 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-catalog-content\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.603711 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-utilities\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.603769 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6l9x7\" (UniqueName: \"kubernetes.io/projected/099349a9-aeb0-4c5d-b1a7-25519bc08836-kube-api-access-6l9x7\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.604195 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-catalog-content\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.604279 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-utilities\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.632907 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6l9x7\" (UniqueName: \"kubernetes.io/projected/099349a9-aeb0-4c5d-b1a7-25519bc08836-kube-api-access-6l9x7\") pod \"community-operators-nstsn\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:51 crc kubenswrapper[5102]: I0123 07:40:51.655144 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:40:52 crc kubenswrapper[5102]: I0123 07:40:52.231765 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-nstsn"] Jan 23 07:40:52 crc kubenswrapper[5102]: I0123 07:40:52.847078 5102 generic.go:334] "Generic (PLEG): container finished" podID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerID="408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134" exitCode=0 Jan 23 07:40:52 crc kubenswrapper[5102]: I0123 07:40:52.847129 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nstsn" event={"ID":"099349a9-aeb0-4c5d-b1a7-25519bc08836","Type":"ContainerDied","Data":"408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134"} Jan 23 07:40:52 crc kubenswrapper[5102]: I0123 07:40:52.847157 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nstsn" event={"ID":"099349a9-aeb0-4c5d-b1a7-25519bc08836","Type":"ContainerStarted","Data":"90e29dc924158ba6ae0706a0d1323f867da7b1e0e74de770e3c5904b03d00ef7"} Jan 23 07:40:52 crc kubenswrapper[5102]: I0123 07:40:52.849255 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 07:40:54 crc kubenswrapper[5102]: I0123 07:40:54.866724 5102 generic.go:334] "Generic (PLEG): container finished" podID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerID="90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f" exitCode=0 Jan 23 07:40:54 crc kubenswrapper[5102]: I0123 07:40:54.866789 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nstsn" event={"ID":"099349a9-aeb0-4c5d-b1a7-25519bc08836","Type":"ContainerDied","Data":"90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f"} Jan 23 07:41:00 crc kubenswrapper[5102]: I0123 07:41:00.917348 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nstsn" event={"ID":"099349a9-aeb0-4c5d-b1a7-25519bc08836","Type":"ContainerStarted","Data":"7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73"} Jan 23 07:41:00 crc kubenswrapper[5102]: I0123 07:41:00.944143 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-nstsn" podStartSLOduration=3.926065404 podStartE2EDuration="9.944117998s" podCreationTimestamp="2026-01-23 07:40:51 +0000 UTC" firstStartedPulling="2026-01-23 07:40:52.848989969 +0000 UTC m=+2803.669338964" lastFinishedPulling="2026-01-23 07:40:58.867042543 +0000 UTC m=+2809.687391558" observedRunningTime="2026-01-23 07:41:00.938519538 +0000 UTC m=+2811.758868523" watchObservedRunningTime="2026-01-23 07:41:00.944117998 +0000 UTC m=+2811.764467003" Jan 23 07:41:01 crc kubenswrapper[5102]: I0123 07:41:01.655877 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:41:01 crc kubenswrapper[5102]: I0123 07:41:01.655946 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:41:02 crc kubenswrapper[5102]: I0123 07:41:02.698973 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/community-operators-nstsn" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="registry-server" probeResult="failure" output=< Jan 23 07:41:02 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 07:41:02 crc kubenswrapper[5102]: > Jan 23 07:41:11 crc kubenswrapper[5102]: I0123 07:41:11.703378 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:41:11 crc kubenswrapper[5102]: I0123 07:41:11.789424 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:41:11 crc kubenswrapper[5102]: I0123 07:41:11.959567 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nstsn"] Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.053471 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-nstsn" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="registry-server" containerID="cri-o://7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73" gracePeriod=2 Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.539648 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.699383 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6l9x7\" (UniqueName: \"kubernetes.io/projected/099349a9-aeb0-4c5d-b1a7-25519bc08836-kube-api-access-6l9x7\") pod \"099349a9-aeb0-4c5d-b1a7-25519bc08836\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.699686 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-catalog-content\") pod \"099349a9-aeb0-4c5d-b1a7-25519bc08836\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.699761 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-utilities\") pod \"099349a9-aeb0-4c5d-b1a7-25519bc08836\" (UID: \"099349a9-aeb0-4c5d-b1a7-25519bc08836\") " Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.701068 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-utilities" (OuterVolumeSpecName: "utilities") pod "099349a9-aeb0-4c5d-b1a7-25519bc08836" (UID: "099349a9-aeb0-4c5d-b1a7-25519bc08836"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.714801 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/099349a9-aeb0-4c5d-b1a7-25519bc08836-kube-api-access-6l9x7" (OuterVolumeSpecName: "kube-api-access-6l9x7") pod "099349a9-aeb0-4c5d-b1a7-25519bc08836" (UID: "099349a9-aeb0-4c5d-b1a7-25519bc08836"). InnerVolumeSpecName "kube-api-access-6l9x7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.779483 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "099349a9-aeb0-4c5d-b1a7-25519bc08836" (UID: "099349a9-aeb0-4c5d-b1a7-25519bc08836"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.802129 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.802198 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/099349a9-aeb0-4c5d-b1a7-25519bc08836-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:13 crc kubenswrapper[5102]: I0123 07:41:13.802217 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6l9x7\" (UniqueName: \"kubernetes.io/projected/099349a9-aeb0-4c5d-b1a7-25519bc08836-kube-api-access-6l9x7\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.063881 5102 generic.go:334] "Generic (PLEG): container finished" podID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerID="7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73" exitCode=0 Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.063932 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nstsn" event={"ID":"099349a9-aeb0-4c5d-b1a7-25519bc08836","Type":"ContainerDied","Data":"7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73"} Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.063968 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-nstsn" event={"ID":"099349a9-aeb0-4c5d-b1a7-25519bc08836","Type":"ContainerDied","Data":"90e29dc924158ba6ae0706a0d1323f867da7b1e0e74de770e3c5904b03d00ef7"} Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.063988 5102 scope.go:117] "RemoveContainer" containerID="7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.063989 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-nstsn" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.090396 5102 scope.go:117] "RemoveContainer" containerID="90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.115667 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-nstsn"] Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.124323 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-nstsn"] Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.124995 5102 scope.go:117] "RemoveContainer" containerID="408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.150568 5102 scope.go:117] "RemoveContainer" containerID="7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73" Jan 23 07:41:14 crc kubenswrapper[5102]: E0123 07:41:14.151284 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73\": container with ID starting with 7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73 not found: ID does not exist" containerID="7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.151343 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73"} err="failed to get container status \"7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73\": rpc error: code = NotFound desc = could not find container \"7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73\": container with ID starting with 7edfafcb6682483a86e5ee28d694c2e61a14530adec82e6f47432c78eaa03b73 not found: ID does not exist" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.151401 5102 scope.go:117] "RemoveContainer" containerID="90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f" Jan 23 07:41:14 crc kubenswrapper[5102]: E0123 07:41:14.151963 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f\": container with ID starting with 90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f not found: ID does not exist" containerID="90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.151992 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f"} err="failed to get container status \"90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f\": rpc error: code = NotFound desc = could not find container \"90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f\": container with ID starting with 90a6f4ce0935e40ccc0da1cc538f1722ded522abc1e77a212707bd5072f1338f not found: ID does not exist" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.152015 5102 scope.go:117] "RemoveContainer" containerID="408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134" Jan 23 07:41:14 crc kubenswrapper[5102]: E0123 07:41:14.152313 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134\": container with ID starting with 408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134 not found: ID does not exist" containerID="408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134" Jan 23 07:41:14 crc kubenswrapper[5102]: I0123 07:41:14.152349 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134"} err="failed to get container status \"408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134\": rpc error: code = NotFound desc = could not find container \"408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134\": container with ID starting with 408b5624020ce6b0f034f94867787138eba10cf07740113ce51b2985e22eb134 not found: ID does not exist" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.164897 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-759n4"] Jan 23 07:41:15 crc kubenswrapper[5102]: E0123 07:41:15.165281 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="extract-content" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.165304 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="extract-content" Jan 23 07:41:15 crc kubenswrapper[5102]: E0123 07:41:15.165329 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="registry-server" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.165339 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="registry-server" Jan 23 07:41:15 crc kubenswrapper[5102]: E0123 07:41:15.165358 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="extract-utilities" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.165382 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="extract-utilities" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.165606 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" containerName="registry-server" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.166935 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.194874 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-759n4"] Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.326330 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5fvqb\" (UniqueName: \"kubernetes.io/projected/b2b3670f-1e95-4a13-928f-96ea5af6581b-kube-api-access-5fvqb\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.326405 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-catalog-content\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.326437 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-utilities\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.428264 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-catalog-content\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.428359 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-utilities\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.428494 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5fvqb\" (UniqueName: \"kubernetes.io/projected/b2b3670f-1e95-4a13-928f-96ea5af6581b-kube-api-access-5fvqb\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.428865 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-catalog-content\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.428887 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-utilities\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.473024 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5fvqb\" (UniqueName: \"kubernetes.io/projected/b2b3670f-1e95-4a13-928f-96ea5af6581b-kube-api-access-5fvqb\") pod \"certified-operators-759n4\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.495786 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.607142 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="099349a9-aeb0-4c5d-b1a7-25519bc08836" path="/var/lib/kubelet/pods/099349a9-aeb0-4c5d-b1a7-25519bc08836/volumes" Jan 23 07:41:15 crc kubenswrapper[5102]: I0123 07:41:15.956676 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-759n4"] Jan 23 07:41:16 crc kubenswrapper[5102]: I0123 07:41:16.080873 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-759n4" event={"ID":"b2b3670f-1e95-4a13-928f-96ea5af6581b","Type":"ContainerStarted","Data":"9d9248cb889a96f395b392b8ddbc62db8e4fac1a7a3d0dacc4819bd1649c62d9"} Jan 23 07:41:17 crc kubenswrapper[5102]: I0123 07:41:17.093460 5102 generic.go:334] "Generic (PLEG): container finished" podID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerID="eb2e671d2261d2c7a4620779b0d556716c35a0c45792afff9783724719796320" exitCode=0 Jan 23 07:41:17 crc kubenswrapper[5102]: I0123 07:41:17.093516 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-759n4" event={"ID":"b2b3670f-1e95-4a13-928f-96ea5af6581b","Type":"ContainerDied","Data":"eb2e671d2261d2c7a4620779b0d556716c35a0c45792afff9783724719796320"} Jan 23 07:41:20 crc kubenswrapper[5102]: I0123 07:41:20.124829 5102 generic.go:334] "Generic (PLEG): container finished" podID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerID="c94dcb4c5e314a44226d21e6423d929f074e6fd3fd2a05bf5dabe10586fe6319" exitCode=0 Jan 23 07:41:20 crc kubenswrapper[5102]: I0123 07:41:20.124905 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-759n4" event={"ID":"b2b3670f-1e95-4a13-928f-96ea5af6581b","Type":"ContainerDied","Data":"c94dcb4c5e314a44226d21e6423d929f074e6fd3fd2a05bf5dabe10586fe6319"} Jan 23 07:41:22 crc kubenswrapper[5102]: I0123 07:41:22.146260 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-759n4" event={"ID":"b2b3670f-1e95-4a13-928f-96ea5af6581b","Type":"ContainerStarted","Data":"ea23913a0a5f63acd06ae10fdfd0a94478059ec7629c4beaf0d155788ac586bd"} Jan 23 07:41:22 crc kubenswrapper[5102]: I0123 07:41:22.169017 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-759n4" podStartSLOduration=3.49026765 podStartE2EDuration="7.168995702s" podCreationTimestamp="2026-01-23 07:41:15 +0000 UTC" firstStartedPulling="2026-01-23 07:41:17.09635203 +0000 UTC m=+2827.916701035" lastFinishedPulling="2026-01-23 07:41:20.775080072 +0000 UTC m=+2831.595429087" observedRunningTime="2026-01-23 07:41:22.168467116 +0000 UTC m=+2832.988816131" watchObservedRunningTime="2026-01-23 07:41:22.168995702 +0000 UTC m=+2832.989344687" Jan 23 07:41:25 crc kubenswrapper[5102]: I0123 07:41:25.496666 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:25 crc kubenswrapper[5102]: I0123 07:41:25.497813 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:25 crc kubenswrapper[5102]: I0123 07:41:25.575852 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:26 crc kubenswrapper[5102]: I0123 07:41:26.253593 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:26 crc kubenswrapper[5102]: I0123 07:41:26.432375 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-759n4"] Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.452787 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-kzpkx"] Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.456431 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.473956 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzpkx"] Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.628766 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4d68f\" (UniqueName: \"kubernetes.io/projected/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-kube-api-access-4d68f\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.628838 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-catalog-content\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.628900 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-utilities\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.730505 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-utilities\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.731771 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4d68f\" (UniqueName: \"kubernetes.io/projected/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-kube-api-access-4d68f\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.732291 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-utilities\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.733036 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-catalog-content\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.733691 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-catalog-content\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.755255 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4d68f\" (UniqueName: \"kubernetes.io/projected/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-kube-api-access-4d68f\") pod \"redhat-marketplace-kzpkx\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:27 crc kubenswrapper[5102]: I0123 07:41:27.780939 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:28 crc kubenswrapper[5102]: I0123 07:41:28.073901 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzpkx"] Jan 23 07:41:28 crc kubenswrapper[5102]: I0123 07:41:28.213956 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzpkx" event={"ID":"f2aa16f0-68bc-4c84-a99b-f577b266bd4e","Type":"ContainerStarted","Data":"95e2405c538c614ffbfece381c3f5cea23a91b87a3e0962a96a5ac0a1a6b297c"} Jan 23 07:41:28 crc kubenswrapper[5102]: I0123 07:41:28.214086 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-759n4" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="registry-server" containerID="cri-o://ea23913a0a5f63acd06ae10fdfd0a94478059ec7629c4beaf0d155788ac586bd" gracePeriod=2 Jan 23 07:41:29 crc kubenswrapper[5102]: I0123 07:41:29.223223 5102 generic.go:334] "Generic (PLEG): container finished" podID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerID="ea23913a0a5f63acd06ae10fdfd0a94478059ec7629c4beaf0d155788ac586bd" exitCode=0 Jan 23 07:41:29 crc kubenswrapper[5102]: I0123 07:41:29.223283 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-759n4" event={"ID":"b2b3670f-1e95-4a13-928f-96ea5af6581b","Type":"ContainerDied","Data":"ea23913a0a5f63acd06ae10fdfd0a94478059ec7629c4beaf0d155788ac586bd"} Jan 23 07:41:29 crc kubenswrapper[5102]: I0123 07:41:29.225458 5102 generic.go:334] "Generic (PLEG): container finished" podID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerID="f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31" exitCode=0 Jan 23 07:41:29 crc kubenswrapper[5102]: I0123 07:41:29.225498 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzpkx" event={"ID":"f2aa16f0-68bc-4c84-a99b-f577b266bd4e","Type":"ContainerDied","Data":"f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31"} Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.004524 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.173093 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-utilities\") pod \"b2b3670f-1e95-4a13-928f-96ea5af6581b\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.173163 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-catalog-content\") pod \"b2b3670f-1e95-4a13-928f-96ea5af6581b\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.173222 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5fvqb\" (UniqueName: \"kubernetes.io/projected/b2b3670f-1e95-4a13-928f-96ea5af6581b-kube-api-access-5fvqb\") pod \"b2b3670f-1e95-4a13-928f-96ea5af6581b\" (UID: \"b2b3670f-1e95-4a13-928f-96ea5af6581b\") " Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.174914 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-utilities" (OuterVolumeSpecName: "utilities") pod "b2b3670f-1e95-4a13-928f-96ea5af6581b" (UID: "b2b3670f-1e95-4a13-928f-96ea5af6581b"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.178189 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2b3670f-1e95-4a13-928f-96ea5af6581b-kube-api-access-5fvqb" (OuterVolumeSpecName: "kube-api-access-5fvqb") pod "b2b3670f-1e95-4a13-928f-96ea5af6581b" (UID: "b2b3670f-1e95-4a13-928f-96ea5af6581b"). InnerVolumeSpecName "kube-api-access-5fvqb". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.236244 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-759n4" event={"ID":"b2b3670f-1e95-4a13-928f-96ea5af6581b","Type":"ContainerDied","Data":"9d9248cb889a96f395b392b8ddbc62db8e4fac1a7a3d0dacc4819bd1649c62d9"} Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.236295 5102 scope.go:117] "RemoveContainer" containerID="ea23913a0a5f63acd06ae10fdfd0a94478059ec7629c4beaf0d155788ac586bd" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.236411 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-759n4" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.275620 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.275680 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5fvqb\" (UniqueName: \"kubernetes.io/projected/b2b3670f-1e95-4a13-928f-96ea5af6581b-kube-api-access-5fvqb\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.303793 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2b3670f-1e95-4a13-928f-96ea5af6581b" (UID: "b2b3670f-1e95-4a13-928f-96ea5af6581b"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.377520 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2b3670f-1e95-4a13-928f-96ea5af6581b-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.604636 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-759n4"] Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.664979 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-759n4"] Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.665853 5102 scope.go:117] "RemoveContainer" containerID="c94dcb4c5e314a44226d21e6423d929f074e6fd3fd2a05bf5dabe10586fe6319" Jan 23 07:41:30 crc kubenswrapper[5102]: I0123 07:41:30.752101 5102 scope.go:117] "RemoveContainer" containerID="eb2e671d2261d2c7a4620779b0d556716c35a0c45792afff9783724719796320" Jan 23 07:41:31 crc kubenswrapper[5102]: I0123 07:41:31.248976 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzpkx" event={"ID":"f2aa16f0-68bc-4c84-a99b-f577b266bd4e","Type":"ContainerStarted","Data":"3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a"} Jan 23 07:41:31 crc kubenswrapper[5102]: I0123 07:41:31.615146 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" path="/var/lib/kubelet/pods/b2b3670f-1e95-4a13-928f-96ea5af6581b/volumes" Jan 23 07:41:32 crc kubenswrapper[5102]: I0123 07:41:32.257072 5102 generic.go:334] "Generic (PLEG): container finished" podID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerID="3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a" exitCode=0 Jan 23 07:41:32 crc kubenswrapper[5102]: I0123 07:41:32.257134 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzpkx" event={"ID":"f2aa16f0-68bc-4c84-a99b-f577b266bd4e","Type":"ContainerDied","Data":"3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a"} Jan 23 07:41:33 crc kubenswrapper[5102]: I0123 07:41:33.265077 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzpkx" event={"ID":"f2aa16f0-68bc-4c84-a99b-f577b266bd4e","Type":"ContainerStarted","Data":"3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e"} Jan 23 07:41:33 crc kubenswrapper[5102]: I0123 07:41:33.288736 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-kzpkx" podStartSLOduration=2.773023926 podStartE2EDuration="6.288719301s" podCreationTimestamp="2026-01-23 07:41:27 +0000 UTC" firstStartedPulling="2026-01-23 07:41:29.227247649 +0000 UTC m=+2840.047596634" lastFinishedPulling="2026-01-23 07:41:32.742943024 +0000 UTC m=+2843.563292009" observedRunningTime="2026-01-23 07:41:33.282275645 +0000 UTC m=+2844.102624610" watchObservedRunningTime="2026-01-23 07:41:33.288719301 +0000 UTC m=+2844.109068276" Jan 23 07:41:37 crc kubenswrapper[5102]: I0123 07:41:37.782082 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:37 crc kubenswrapper[5102]: I0123 07:41:37.782415 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:37 crc kubenswrapper[5102]: I0123 07:41:37.835945 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:38 crc kubenswrapper[5102]: I0123 07:41:38.361756 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:38 crc kubenswrapper[5102]: I0123 07:41:38.448038 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzpkx"] Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.319631 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-kzpkx" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="registry-server" containerID="cri-o://3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e" gracePeriod=2 Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.792660 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.891359 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-utilities\") pod \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.891449 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d68f\" (UniqueName: \"kubernetes.io/projected/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-kube-api-access-4d68f\") pod \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.891601 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-catalog-content\") pod \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\" (UID: \"f2aa16f0-68bc-4c84-a99b-f577b266bd4e\") " Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.892928 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-utilities" (OuterVolumeSpecName: "utilities") pod "f2aa16f0-68bc-4c84-a99b-f577b266bd4e" (UID: "f2aa16f0-68bc-4c84-a99b-f577b266bd4e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.897033 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-kube-api-access-4d68f" (OuterVolumeSpecName: "kube-api-access-4d68f") pod "f2aa16f0-68bc-4c84-a99b-f577b266bd4e" (UID: "f2aa16f0-68bc-4c84-a99b-f577b266bd4e"). InnerVolumeSpecName "kube-api-access-4d68f". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.918227 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "f2aa16f0-68bc-4c84-a99b-f577b266bd4e" (UID: "f2aa16f0-68bc-4c84-a99b-f577b266bd4e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.993358 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.993408 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:40 crc kubenswrapper[5102]: I0123 07:41:40.993423 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d68f\" (UniqueName: \"kubernetes.io/projected/f2aa16f0-68bc-4c84-a99b-f577b266bd4e-kube-api-access-4d68f\") on node \"crc\" DevicePath \"\"" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.422730 5102 generic.go:334] "Generic (PLEG): container finished" podID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerID="3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e" exitCode=0 Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.422785 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzpkx" event={"ID":"f2aa16f0-68bc-4c84-a99b-f577b266bd4e","Type":"ContainerDied","Data":"3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e"} Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.422819 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-kzpkx" event={"ID":"f2aa16f0-68bc-4c84-a99b-f577b266bd4e","Type":"ContainerDied","Data":"95e2405c538c614ffbfece381c3f5cea23a91b87a3e0962a96a5ac0a1a6b297c"} Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.422842 5102 scope.go:117] "RemoveContainer" containerID="3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.423003 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-kzpkx" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.485037 5102 scope.go:117] "RemoveContainer" containerID="3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.501616 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzpkx"] Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.517010 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-kzpkx"] Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.524355 5102 scope.go:117] "RemoveContainer" containerID="f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.543996 5102 scope.go:117] "RemoveContainer" containerID="3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e" Jan 23 07:41:41 crc kubenswrapper[5102]: E0123 07:41:41.544309 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e\": container with ID starting with 3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e not found: ID does not exist" containerID="3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.544343 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e"} err="failed to get container status \"3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e\": rpc error: code = NotFound desc = could not find container \"3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e\": container with ID starting with 3ea2e68b1dd1e593bfc0171d99b9c184316d9b7f72d538e5dcfd8a9f7365330e not found: ID does not exist" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.544363 5102 scope.go:117] "RemoveContainer" containerID="3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a" Jan 23 07:41:41 crc kubenswrapper[5102]: E0123 07:41:41.544596 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a\": container with ID starting with 3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a not found: ID does not exist" containerID="3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.544631 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a"} err="failed to get container status \"3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a\": rpc error: code = NotFound desc = could not find container \"3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a\": container with ID starting with 3b155833d9c0d895f6a2ea650b7dd2eca2aabddcf5e07b44aa35dfe700865d0a not found: ID does not exist" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.544644 5102 scope.go:117] "RemoveContainer" containerID="f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31" Jan 23 07:41:41 crc kubenswrapper[5102]: E0123 07:41:41.544864 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31\": container with ID starting with f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31 not found: ID does not exist" containerID="f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.544894 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31"} err="failed to get container status \"f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31\": rpc error: code = NotFound desc = could not find container \"f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31\": container with ID starting with f3fc877bfc44c42021120a2b4152475f64669c9989f5ec50faaca32780bdef31 not found: ID does not exist" Jan 23 07:41:41 crc kubenswrapper[5102]: I0123 07:41:41.605679 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" path="/var/lib/kubelet/pods/f2aa16f0-68bc-4c84-a99b-f577b266bd4e/volumes" Jan 23 07:41:46 crc kubenswrapper[5102]: I0123 07:41:46.767868 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:41:46 crc kubenswrapper[5102]: I0123 07:41:46.768362 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:42:16 crc kubenswrapper[5102]: I0123 07:42:16.768587 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:42:16 crc kubenswrapper[5102]: I0123 07:42:16.769238 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:42:46 crc kubenswrapper[5102]: I0123 07:42:46.768503 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:42:46 crc kubenswrapper[5102]: I0123 07:42:46.769370 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:42:46 crc kubenswrapper[5102]: I0123 07:42:46.769460 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:42:46 crc kubenswrapper[5102]: I0123 07:42:46.770761 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a9cc721d80c7b6b8f98577ee94672d949fd5c14371a18ae1383e7f6ac5c86e63"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:42:46 crc kubenswrapper[5102]: I0123 07:42:46.770928 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://a9cc721d80c7b6b8f98577ee94672d949fd5c14371a18ae1383e7f6ac5c86e63" gracePeriod=600 Jan 23 07:42:47 crc kubenswrapper[5102]: I0123 07:42:47.063974 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="a9cc721d80c7b6b8f98577ee94672d949fd5c14371a18ae1383e7f6ac5c86e63" exitCode=0 Jan 23 07:42:47 crc kubenswrapper[5102]: I0123 07:42:47.064019 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"a9cc721d80c7b6b8f98577ee94672d949fd5c14371a18ae1383e7f6ac5c86e63"} Jan 23 07:42:47 crc kubenswrapper[5102]: I0123 07:42:47.064300 5102 scope.go:117] "RemoveContainer" containerID="46d4da423158164509ccc8e1efb8318efc0d1c89d4214c01556eb8b65fc56337" Jan 23 07:42:48 crc kubenswrapper[5102]: I0123 07:42:48.077695 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f"} Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.059706 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-pqhts"] Jan 23 07:43:05 crc kubenswrapper[5102]: E0123 07:43:05.060992 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="extract-utilities" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.061030 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="extract-utilities" Jan 23 07:43:05 crc kubenswrapper[5102]: E0123 07:43:05.061063 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="extract-utilities" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.061081 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="extract-utilities" Jan 23 07:43:05 crc kubenswrapper[5102]: E0123 07:43:05.061109 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="registry-server" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.061125 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="registry-server" Jan 23 07:43:05 crc kubenswrapper[5102]: E0123 07:43:05.061157 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="registry-server" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.061169 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="registry-server" Jan 23 07:43:05 crc kubenswrapper[5102]: E0123 07:43:05.061196 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="extract-content" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.061212 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="extract-content" Jan 23 07:43:05 crc kubenswrapper[5102]: E0123 07:43:05.061245 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="extract-content" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.061261 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="extract-content" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.064720 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b2b3670f-1e95-4a13-928f-96ea5af6581b" containerName="registry-server" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.064791 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="f2aa16f0-68bc-4c84-a99b-f577b266bd4e" containerName="registry-server" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.078046 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.078929 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqhts"] Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.257111 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bq7jg\" (UniqueName: \"kubernetes.io/projected/d2458172-918c-40b3-a6f3-ecb257abbd55-kube-api-access-bq7jg\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.258061 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-utilities\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.258099 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-catalog-content\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.359426 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-catalog-content\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.359566 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bq7jg\" (UniqueName: \"kubernetes.io/projected/d2458172-918c-40b3-a6f3-ecb257abbd55-kube-api-access-bq7jg\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.359651 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-utilities\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.360061 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-catalog-content\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.360140 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-utilities\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.378692 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bq7jg\" (UniqueName: \"kubernetes.io/projected/d2458172-918c-40b3-a6f3-ecb257abbd55-kube-api-access-bq7jg\") pod \"redhat-operators-pqhts\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.435818 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:05 crc kubenswrapper[5102]: I0123 07:43:05.855208 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-pqhts"] Jan 23 07:43:05 crc kubenswrapper[5102]: W0123 07:43:05.861783 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd2458172_918c_40b3_a6f3_ecb257abbd55.slice/crio-545fb1edecfbcadf2877c4c290f133c679596b8fb63ac14cd1603968e7b62e97 WatchSource:0}: Error finding container 545fb1edecfbcadf2877c4c290f133c679596b8fb63ac14cd1603968e7b62e97: Status 404 returned error can't find the container with id 545fb1edecfbcadf2877c4c290f133c679596b8fb63ac14cd1603968e7b62e97 Jan 23 07:43:06 crc kubenswrapper[5102]: I0123 07:43:06.261224 5102 generic.go:334] "Generic (PLEG): container finished" podID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerID="7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb" exitCode=0 Jan 23 07:43:06 crc kubenswrapper[5102]: I0123 07:43:06.261577 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqhts" event={"ID":"d2458172-918c-40b3-a6f3-ecb257abbd55","Type":"ContainerDied","Data":"7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb"} Jan 23 07:43:06 crc kubenswrapper[5102]: I0123 07:43:06.261611 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqhts" event={"ID":"d2458172-918c-40b3-a6f3-ecb257abbd55","Type":"ContainerStarted","Data":"545fb1edecfbcadf2877c4c290f133c679596b8fb63ac14cd1603968e7b62e97"} Jan 23 07:43:08 crc kubenswrapper[5102]: I0123 07:43:08.282910 5102 generic.go:334] "Generic (PLEG): container finished" podID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerID="bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487" exitCode=0 Jan 23 07:43:08 crc kubenswrapper[5102]: I0123 07:43:08.282965 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqhts" event={"ID":"d2458172-918c-40b3-a6f3-ecb257abbd55","Type":"ContainerDied","Data":"bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487"} Jan 23 07:43:09 crc kubenswrapper[5102]: I0123 07:43:09.295506 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqhts" event={"ID":"d2458172-918c-40b3-a6f3-ecb257abbd55","Type":"ContainerStarted","Data":"96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b"} Jan 23 07:43:09 crc kubenswrapper[5102]: I0123 07:43:09.322598 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-pqhts" podStartSLOduration=1.8297533769999998 podStartE2EDuration="4.322567815s" podCreationTimestamp="2026-01-23 07:43:05 +0000 UTC" firstStartedPulling="2026-01-23 07:43:06.263336291 +0000 UTC m=+2937.083685286" lastFinishedPulling="2026-01-23 07:43:08.756150739 +0000 UTC m=+2939.576499724" observedRunningTime="2026-01-23 07:43:09.319193442 +0000 UTC m=+2940.139542407" watchObservedRunningTime="2026-01-23 07:43:09.322567815 +0000 UTC m=+2940.142916790" Jan 23 07:43:15 crc kubenswrapper[5102]: I0123 07:43:15.436916 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:15 crc kubenswrapper[5102]: I0123 07:43:15.437521 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:16 crc kubenswrapper[5102]: I0123 07:43:16.493771 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-pqhts" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="registry-server" probeResult="failure" output=< Jan 23 07:43:16 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 07:43:16 crc kubenswrapper[5102]: > Jan 23 07:43:25 crc kubenswrapper[5102]: I0123 07:43:25.501452 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:25 crc kubenswrapper[5102]: I0123 07:43:25.578156 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:25 crc kubenswrapper[5102]: I0123 07:43:25.748360 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pqhts"] Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.461226 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-pqhts" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="registry-server" containerID="cri-o://96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b" gracePeriod=2 Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.899870 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.947347 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-catalog-content\") pod \"d2458172-918c-40b3-a6f3-ecb257abbd55\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.947464 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-utilities\") pod \"d2458172-918c-40b3-a6f3-ecb257abbd55\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.947531 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bq7jg\" (UniqueName: \"kubernetes.io/projected/d2458172-918c-40b3-a6f3-ecb257abbd55-kube-api-access-bq7jg\") pod \"d2458172-918c-40b3-a6f3-ecb257abbd55\" (UID: \"d2458172-918c-40b3-a6f3-ecb257abbd55\") " Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.948604 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-utilities" (OuterVolumeSpecName: "utilities") pod "d2458172-918c-40b3-a6f3-ecb257abbd55" (UID: "d2458172-918c-40b3-a6f3-ecb257abbd55"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.948810 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:43:27 crc kubenswrapper[5102]: I0123 07:43:27.954371 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d2458172-918c-40b3-a6f3-ecb257abbd55-kube-api-access-bq7jg" (OuterVolumeSpecName: "kube-api-access-bq7jg") pod "d2458172-918c-40b3-a6f3-ecb257abbd55" (UID: "d2458172-918c-40b3-a6f3-ecb257abbd55"). InnerVolumeSpecName "kube-api-access-bq7jg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.050377 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bq7jg\" (UniqueName: \"kubernetes.io/projected/d2458172-918c-40b3-a6f3-ecb257abbd55-kube-api-access-bq7jg\") on node \"crc\" DevicePath \"\"" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.102803 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "d2458172-918c-40b3-a6f3-ecb257abbd55" (UID: "d2458172-918c-40b3-a6f3-ecb257abbd55"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.151990 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/d2458172-918c-40b3-a6f3-ecb257abbd55-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.473693 5102 generic.go:334] "Generic (PLEG): container finished" podID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerID="96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b" exitCode=0 Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.473782 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-pqhts" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.473813 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqhts" event={"ID":"d2458172-918c-40b3-a6f3-ecb257abbd55","Type":"ContainerDied","Data":"96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b"} Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.474007 5102 scope.go:117] "RemoveContainer" containerID="96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.473973 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-pqhts" event={"ID":"d2458172-918c-40b3-a6f3-ecb257abbd55","Type":"ContainerDied","Data":"545fb1edecfbcadf2877c4c290f133c679596b8fb63ac14cd1603968e7b62e97"} Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.500738 5102 scope.go:117] "RemoveContainer" containerID="bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.537867 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-pqhts"] Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.544479 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-pqhts"] Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.573976 5102 scope.go:117] "RemoveContainer" containerID="7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.593390 5102 scope.go:117] "RemoveContainer" containerID="96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b" Jan 23 07:43:28 crc kubenswrapper[5102]: E0123 07:43:28.598226 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b\": container with ID starting with 96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b not found: ID does not exist" containerID="96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.598275 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b"} err="failed to get container status \"96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b\": rpc error: code = NotFound desc = could not find container \"96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b\": container with ID starting with 96a037c26107e131db5c5e672aa965611e77103a09c68123a29117d8338a5f9b not found: ID does not exist" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.598309 5102 scope.go:117] "RemoveContainer" containerID="bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487" Jan 23 07:43:28 crc kubenswrapper[5102]: E0123 07:43:28.599336 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487\": container with ID starting with bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487 not found: ID does not exist" containerID="bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.599421 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487"} err="failed to get container status \"bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487\": rpc error: code = NotFound desc = could not find container \"bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487\": container with ID starting with bf4ca92dd935e04ed8115cb8a33b63ab093fea8a5d4478f3c55eb19ee89c7487 not found: ID does not exist" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.599473 5102 scope.go:117] "RemoveContainer" containerID="7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb" Jan 23 07:43:28 crc kubenswrapper[5102]: E0123 07:43:28.603022 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb\": container with ID starting with 7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb not found: ID does not exist" containerID="7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb" Jan 23 07:43:28 crc kubenswrapper[5102]: I0123 07:43:28.603077 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb"} err="failed to get container status \"7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb\": rpc error: code = NotFound desc = could not find container \"7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb\": container with ID starting with 7cf0eea97e17fe59673df1814a367799aa0be331b064ad0a8ff45a5e4de8e0bb not found: ID does not exist" Jan 23 07:43:29 crc kubenswrapper[5102]: I0123 07:43:29.608129 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" path="/var/lib/kubelet/pods/d2458172-918c-40b3-a6f3-ecb257abbd55/volumes" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.161500 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8"] Jan 23 07:45:00 crc kubenswrapper[5102]: E0123 07:45:00.162532 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="extract-content" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.162644 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="extract-content" Jan 23 07:45:00 crc kubenswrapper[5102]: E0123 07:45:00.162670 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="extract-utilities" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.162680 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="extract-utilities" Jan 23 07:45:00 crc kubenswrapper[5102]: E0123 07:45:00.162717 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="registry-server" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.162728 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="registry-server" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.162930 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d2458172-918c-40b3-a6f3-ecb257abbd55" containerName="registry-server" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.163677 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.167189 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.175040 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.182452 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8"] Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.200707 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ead9d1be-80cc-4e9e-b413-9f9095d0483b-secret-volume\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.200766 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ead9d1be-80cc-4e9e-b413-9f9095d0483b-config-volume\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.200827 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qs7s4\" (UniqueName: \"kubernetes.io/projected/ead9d1be-80cc-4e9e-b413-9f9095d0483b-kube-api-access-qs7s4\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.301854 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qs7s4\" (UniqueName: \"kubernetes.io/projected/ead9d1be-80cc-4e9e-b413-9f9095d0483b-kube-api-access-qs7s4\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.301990 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ead9d1be-80cc-4e9e-b413-9f9095d0483b-secret-volume\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.302049 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ead9d1be-80cc-4e9e-b413-9f9095d0483b-config-volume\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.302955 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ead9d1be-80cc-4e9e-b413-9f9095d0483b-config-volume\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.307713 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ead9d1be-80cc-4e9e-b413-9f9095d0483b-secret-volume\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.318782 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qs7s4\" (UniqueName: \"kubernetes.io/projected/ead9d1be-80cc-4e9e-b413-9f9095d0483b-kube-api-access-qs7s4\") pod \"collect-profiles-29485905-l9vx8\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.502798 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:00 crc kubenswrapper[5102]: I0123 07:45:00.947331 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8"] Jan 23 07:45:01 crc kubenswrapper[5102]: I0123 07:45:01.257377 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" event={"ID":"ead9d1be-80cc-4e9e-b413-9f9095d0483b","Type":"ContainerStarted","Data":"5435ec258553850903e880653a680a72480e70c428f7b284f43cf4d6794eaf86"} Jan 23 07:45:01 crc kubenswrapper[5102]: I0123 07:45:01.257427 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" event={"ID":"ead9d1be-80cc-4e9e-b413-9f9095d0483b","Type":"ContainerStarted","Data":"b8c857799f05ac4b42b286f6a8ec6c412ce3d540c794f5bd4a3824b09a0d1be4"} Jan 23 07:45:01 crc kubenswrapper[5102]: I0123 07:45:01.277482 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" podStartSLOduration=1.277463694 podStartE2EDuration="1.277463694s" podCreationTimestamp="2026-01-23 07:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 07:45:01.270341617 +0000 UTC m=+3052.090690612" watchObservedRunningTime="2026-01-23 07:45:01.277463694 +0000 UTC m=+3052.097812669" Jan 23 07:45:02 crc kubenswrapper[5102]: I0123 07:45:02.267318 5102 generic.go:334] "Generic (PLEG): container finished" podID="ead9d1be-80cc-4e9e-b413-9f9095d0483b" containerID="5435ec258553850903e880653a680a72480e70c428f7b284f43cf4d6794eaf86" exitCode=0 Jan 23 07:45:02 crc kubenswrapper[5102]: I0123 07:45:02.267700 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" event={"ID":"ead9d1be-80cc-4e9e-b413-9f9095d0483b","Type":"ContainerDied","Data":"5435ec258553850903e880653a680a72480e70c428f7b284f43cf4d6794eaf86"} Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.624087 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.646601 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ead9d1be-80cc-4e9e-b413-9f9095d0483b-config-volume\") pod \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.646874 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs7s4\" (UniqueName: \"kubernetes.io/projected/ead9d1be-80cc-4e9e-b413-9f9095d0483b-kube-api-access-qs7s4\") pod \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.646978 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ead9d1be-80cc-4e9e-b413-9f9095d0483b-secret-volume\") pod \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\" (UID: \"ead9d1be-80cc-4e9e-b413-9f9095d0483b\") " Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.647380 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/ead9d1be-80cc-4e9e-b413-9f9095d0483b-config-volume" (OuterVolumeSpecName: "config-volume") pod "ead9d1be-80cc-4e9e-b413-9f9095d0483b" (UID: "ead9d1be-80cc-4e9e-b413-9f9095d0483b"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.654579 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/ead9d1be-80cc-4e9e-b413-9f9095d0483b-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "ead9d1be-80cc-4e9e-b413-9f9095d0483b" (UID: "ead9d1be-80cc-4e9e-b413-9f9095d0483b"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.655300 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ead9d1be-80cc-4e9e-b413-9f9095d0483b-kube-api-access-qs7s4" (OuterVolumeSpecName: "kube-api-access-qs7s4") pod "ead9d1be-80cc-4e9e-b413-9f9095d0483b" (UID: "ead9d1be-80cc-4e9e-b413-9f9095d0483b"). InnerVolumeSpecName "kube-api-access-qs7s4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.748479 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/ead9d1be-80cc-4e9e-b413-9f9095d0483b-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.748532 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs7s4\" (UniqueName: \"kubernetes.io/projected/ead9d1be-80cc-4e9e-b413-9f9095d0483b-kube-api-access-qs7s4\") on node \"crc\" DevicePath \"\"" Jan 23 07:45:03 crc kubenswrapper[5102]: I0123 07:45:03.748565 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/ead9d1be-80cc-4e9e-b413-9f9095d0483b-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 07:45:04 crc kubenswrapper[5102]: I0123 07:45:04.285533 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" event={"ID":"ead9d1be-80cc-4e9e-b413-9f9095d0483b","Type":"ContainerDied","Data":"b8c857799f05ac4b42b286f6a8ec6c412ce3d540c794f5bd4a3824b09a0d1be4"} Jan 23 07:45:04 crc kubenswrapper[5102]: I0123 07:45:04.285600 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8" Jan 23 07:45:04 crc kubenswrapper[5102]: I0123 07:45:04.285617 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b8c857799f05ac4b42b286f6a8ec6c412ce3d540c794f5bd4a3824b09a0d1be4" Jan 23 07:45:04 crc kubenswrapper[5102]: I0123 07:45:04.364379 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z"] Jan 23 07:45:04 crc kubenswrapper[5102]: I0123 07:45:04.375943 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485860-v568z"] Jan 23 07:45:05 crc kubenswrapper[5102]: I0123 07:45:05.613324 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="53cdfad3-0375-4019-bc82-0240a66527c6" path="/var/lib/kubelet/pods/53cdfad3-0375-4019-bc82-0240a66527c6/volumes" Jan 23 07:45:15 crc kubenswrapper[5102]: I0123 07:45:15.842327 5102 scope.go:117] "RemoveContainer" containerID="26596e9af76163e8a38826b5156c15696fccd899d182d740d83a3abb6eea15f7" Jan 23 07:45:16 crc kubenswrapper[5102]: I0123 07:45:16.768613 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:45:16 crc kubenswrapper[5102]: I0123 07:45:16.769068 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:45:46 crc kubenswrapper[5102]: I0123 07:45:46.768646 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:45:46 crc kubenswrapper[5102]: I0123 07:45:46.769270 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.768158 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.770016 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.770203 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.771028 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.771223 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" gracePeriod=600 Jan 23 07:46:16 crc kubenswrapper[5102]: E0123 07:46:16.913430 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.968038 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" exitCode=0 Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.968114 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f"} Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.968449 5102 scope.go:117] "RemoveContainer" containerID="a9cc721d80c7b6b8f98577ee94672d949fd5c14371a18ae1383e7f6ac5c86e63" Jan 23 07:46:16 crc kubenswrapper[5102]: I0123 07:46:16.969055 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:46:16 crc kubenswrapper[5102]: E0123 07:46:16.969415 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:46:28 crc kubenswrapper[5102]: I0123 07:46:28.598152 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:46:28 crc kubenswrapper[5102]: E0123 07:46:28.598838 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:46:42 crc kubenswrapper[5102]: I0123 07:46:42.597972 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:46:42 crc kubenswrapper[5102]: E0123 07:46:42.598693 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:46:56 crc kubenswrapper[5102]: I0123 07:46:56.598537 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:46:56 crc kubenswrapper[5102]: E0123 07:46:56.600509 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:47:10 crc kubenswrapper[5102]: I0123 07:47:10.598606 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:47:10 crc kubenswrapper[5102]: E0123 07:47:10.599800 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:47:23 crc kubenswrapper[5102]: I0123 07:47:23.598448 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:47:23 crc kubenswrapper[5102]: E0123 07:47:23.599593 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:47:36 crc kubenswrapper[5102]: I0123 07:47:36.599058 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:47:36 crc kubenswrapper[5102]: E0123 07:47:36.600317 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:47:47 crc kubenswrapper[5102]: I0123 07:47:47.598313 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:47:47 crc kubenswrapper[5102]: E0123 07:47:47.599267 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:47:59 crc kubenswrapper[5102]: I0123 07:47:59.612674 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:47:59 crc kubenswrapper[5102]: E0123 07:47:59.613584 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:48:10 crc kubenswrapper[5102]: I0123 07:48:10.598975 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:48:10 crc kubenswrapper[5102]: E0123 07:48:10.600278 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:48:21 crc kubenswrapper[5102]: I0123 07:48:21.598298 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:48:21 crc kubenswrapper[5102]: E0123 07:48:21.598909 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:48:35 crc kubenswrapper[5102]: I0123 07:48:35.599297 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:48:35 crc kubenswrapper[5102]: E0123 07:48:35.600136 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:48:48 crc kubenswrapper[5102]: I0123 07:48:48.599203 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:48:48 crc kubenswrapper[5102]: E0123 07:48:48.600497 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:48:59 crc kubenswrapper[5102]: I0123 07:48:59.607232 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:48:59 crc kubenswrapper[5102]: E0123 07:48:59.608582 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:49:11 crc kubenswrapper[5102]: I0123 07:49:11.597942 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:49:11 crc kubenswrapper[5102]: E0123 07:49:11.598711 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:49:23 crc kubenswrapper[5102]: I0123 07:49:23.599921 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:49:23 crc kubenswrapper[5102]: E0123 07:49:23.600900 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:49:38 crc kubenswrapper[5102]: I0123 07:49:38.598359 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:49:38 crc kubenswrapper[5102]: E0123 07:49:38.599519 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:49:52 crc kubenswrapper[5102]: I0123 07:49:52.598157 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:49:52 crc kubenswrapper[5102]: E0123 07:49:52.598929 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:50:06 crc kubenswrapper[5102]: I0123 07:50:06.599002 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:50:06 crc kubenswrapper[5102]: E0123 07:50:06.599871 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:50:19 crc kubenswrapper[5102]: I0123 07:50:19.610598 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:50:19 crc kubenswrapper[5102]: E0123 07:50:19.612471 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:50:32 crc kubenswrapper[5102]: I0123 07:50:32.598363 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:50:32 crc kubenswrapper[5102]: E0123 07:50:32.599073 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:50:43 crc kubenswrapper[5102]: I0123 07:50:43.598587 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:50:43 crc kubenswrapper[5102]: E0123 07:50:43.599666 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:50:55 crc kubenswrapper[5102]: I0123 07:50:55.598952 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:50:55 crc kubenswrapper[5102]: E0123 07:50:55.599518 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:51:09 crc kubenswrapper[5102]: I0123 07:51:09.606047 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:51:09 crc kubenswrapper[5102]: E0123 07:51:09.606871 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:51:21 crc kubenswrapper[5102]: I0123 07:51:21.597888 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:51:22 crc kubenswrapper[5102]: I0123 07:51:22.181202 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"6fc6a962fac80ad1c96a176a0df58afe0d320b534e453bb191a6ffab6d9850a9"} Jan 23 07:51:44 crc kubenswrapper[5102]: I0123 07:51:44.926476 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-pwqds"] Jan 23 07:51:44 crc kubenswrapper[5102]: E0123 07:51:44.927987 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="ead9d1be-80cc-4e9e-b413-9f9095d0483b" containerName="collect-profiles" Jan 23 07:51:44 crc kubenswrapper[5102]: I0123 07:51:44.928022 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="ead9d1be-80cc-4e9e-b413-9f9095d0483b" containerName="collect-profiles" Jan 23 07:51:44 crc kubenswrapper[5102]: I0123 07:51:44.928319 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="ead9d1be-80cc-4e9e-b413-9f9095d0483b" containerName="collect-profiles" Jan 23 07:51:44 crc kubenswrapper[5102]: I0123 07:51:44.930290 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:44 crc kubenswrapper[5102]: I0123 07:51:44.945883 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pwqds"] Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.086299 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-catalog-content\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.086496 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-khng4\" (UniqueName: \"kubernetes.io/projected/7029efc0-e95a-4cb7-a549-3db92aac9081-kube-api-access-khng4\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.086596 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-utilities\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.187617 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-utilities\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.187994 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-catalog-content\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.188161 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-khng4\" (UniqueName: \"kubernetes.io/projected/7029efc0-e95a-4cb7-a549-3db92aac9081-kube-api-access-khng4\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.189135 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-utilities\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.189516 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-catalog-content\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.206708 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-khng4\" (UniqueName: \"kubernetes.io/projected/7029efc0-e95a-4cb7-a549-3db92aac9081-kube-api-access-khng4\") pod \"community-operators-pwqds\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.267639 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:45 crc kubenswrapper[5102]: I0123 07:51:45.828452 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-pwqds"] Jan 23 07:51:46 crc kubenswrapper[5102]: I0123 07:51:46.397107 5102 generic.go:334] "Generic (PLEG): container finished" podID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerID="b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d" exitCode=0 Jan 23 07:51:46 crc kubenswrapper[5102]: I0123 07:51:46.397253 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwqds" event={"ID":"7029efc0-e95a-4cb7-a549-3db92aac9081","Type":"ContainerDied","Data":"b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d"} Jan 23 07:51:46 crc kubenswrapper[5102]: I0123 07:51:46.397622 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwqds" event={"ID":"7029efc0-e95a-4cb7-a549-3db92aac9081","Type":"ContainerStarted","Data":"c19a3d8ff09e1a7a7785a28fa80ec3591ccb6f225634bd620416abd6b9cb28f9"} Jan 23 07:51:46 crc kubenswrapper[5102]: I0123 07:51:46.400246 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 07:51:47 crc kubenswrapper[5102]: I0123 07:51:47.415720 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwqds" event={"ID":"7029efc0-e95a-4cb7-a549-3db92aac9081","Type":"ContainerStarted","Data":"c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5"} Jan 23 07:51:48 crc kubenswrapper[5102]: I0123 07:51:48.424954 5102 generic.go:334] "Generic (PLEG): container finished" podID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerID="c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5" exitCode=0 Jan 23 07:51:48 crc kubenswrapper[5102]: I0123 07:51:48.424995 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwqds" event={"ID":"7029efc0-e95a-4cb7-a549-3db92aac9081","Type":"ContainerDied","Data":"c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5"} Jan 23 07:51:49 crc kubenswrapper[5102]: I0123 07:51:49.434911 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwqds" event={"ID":"7029efc0-e95a-4cb7-a549-3db92aac9081","Type":"ContainerStarted","Data":"9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d"} Jan 23 07:51:49 crc kubenswrapper[5102]: I0123 07:51:49.469275 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-pwqds" podStartSLOduration=2.991821231 podStartE2EDuration="5.469252742s" podCreationTimestamp="2026-01-23 07:51:44 +0000 UTC" firstStartedPulling="2026-01-23 07:51:46.399710114 +0000 UTC m=+3457.220059119" lastFinishedPulling="2026-01-23 07:51:48.877141615 +0000 UTC m=+3459.697490630" observedRunningTime="2026-01-23 07:51:49.460954117 +0000 UTC m=+3460.281303122" watchObservedRunningTime="2026-01-23 07:51:49.469252742 +0000 UTC m=+3460.289601727" Jan 23 07:51:55 crc kubenswrapper[5102]: I0123 07:51:55.269364 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:55 crc kubenswrapper[5102]: I0123 07:51:55.269973 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:55 crc kubenswrapper[5102]: I0123 07:51:55.328523 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:55 crc kubenswrapper[5102]: I0123 07:51:55.509018 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:55 crc kubenswrapper[5102]: I0123 07:51:55.571415 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pwqds"] Jan 23 07:51:57 crc kubenswrapper[5102]: I0123 07:51:57.490234 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-pwqds" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="registry-server" containerID="cri-o://9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d" gracePeriod=2 Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.074103 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.182533 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-catalog-content\") pod \"7029efc0-e95a-4cb7-a549-3db92aac9081\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.182592 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-utilities\") pod \"7029efc0-e95a-4cb7-a549-3db92aac9081\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.182614 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-khng4\" (UniqueName: \"kubernetes.io/projected/7029efc0-e95a-4cb7-a549-3db92aac9081-kube-api-access-khng4\") pod \"7029efc0-e95a-4cb7-a549-3db92aac9081\" (UID: \"7029efc0-e95a-4cb7-a549-3db92aac9081\") " Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.183366 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-utilities" (OuterVolumeSpecName: "utilities") pod "7029efc0-e95a-4cb7-a549-3db92aac9081" (UID: "7029efc0-e95a-4cb7-a549-3db92aac9081"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.189437 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7029efc0-e95a-4cb7-a549-3db92aac9081-kube-api-access-khng4" (OuterVolumeSpecName: "kube-api-access-khng4") pod "7029efc0-e95a-4cb7-a549-3db92aac9081" (UID: "7029efc0-e95a-4cb7-a549-3db92aac9081"). InnerVolumeSpecName "kube-api-access-khng4". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.235403 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "7029efc0-e95a-4cb7-a549-3db92aac9081" (UID: "7029efc0-e95a-4cb7-a549-3db92aac9081"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.283965 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-khng4\" (UniqueName: \"kubernetes.io/projected/7029efc0-e95a-4cb7-a549-3db92aac9081-kube-api-access-khng4\") on node \"crc\" DevicePath \"\"" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.283999 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.284008 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/7029efc0-e95a-4cb7-a549-3db92aac9081-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.501361 5102 generic.go:334] "Generic (PLEG): container finished" podID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerID="9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d" exitCode=0 Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.501435 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwqds" event={"ID":"7029efc0-e95a-4cb7-a549-3db92aac9081","Type":"ContainerDied","Data":"9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d"} Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.501486 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-pwqds" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.501510 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-pwqds" event={"ID":"7029efc0-e95a-4cb7-a549-3db92aac9081","Type":"ContainerDied","Data":"c19a3d8ff09e1a7a7785a28fa80ec3591ccb6f225634bd620416abd6b9cb28f9"} Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.501574 5102 scope.go:117] "RemoveContainer" containerID="9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.531174 5102 scope.go:117] "RemoveContainer" containerID="c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.554892 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-pwqds"] Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.560026 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-pwqds"] Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.578392 5102 scope.go:117] "RemoveContainer" containerID="b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.603016 5102 scope.go:117] "RemoveContainer" containerID="9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d" Jan 23 07:51:58 crc kubenswrapper[5102]: E0123 07:51:58.604579 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d\": container with ID starting with 9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d not found: ID does not exist" containerID="9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.604648 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d"} err="failed to get container status \"9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d\": rpc error: code = NotFound desc = could not find container \"9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d\": container with ID starting with 9002c01401f22ae1d9743528c12a3c103a902bf45e574fa89a6c86faff1f748d not found: ID does not exist" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.604690 5102 scope.go:117] "RemoveContainer" containerID="c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5" Jan 23 07:51:58 crc kubenswrapper[5102]: E0123 07:51:58.605128 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5\": container with ID starting with c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5 not found: ID does not exist" containerID="c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.605171 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5"} err="failed to get container status \"c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5\": rpc error: code = NotFound desc = could not find container \"c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5\": container with ID starting with c0decb0407a30c5a77c963a7863df0c968a0f7d0bd030786502909ea87453eb5 not found: ID does not exist" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.605199 5102 scope.go:117] "RemoveContainer" containerID="b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d" Jan 23 07:51:58 crc kubenswrapper[5102]: E0123 07:51:58.605847 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d\": container with ID starting with b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d not found: ID does not exist" containerID="b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d" Jan 23 07:51:58 crc kubenswrapper[5102]: I0123 07:51:58.605881 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d"} err="failed to get container status \"b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d\": rpc error: code = NotFound desc = could not find container \"b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d\": container with ID starting with b44cfb230e30c2eb2309a3b9341503ea9bed68c79f8fe971a9373a1e087e994d not found: ID does not exist" Jan 23 07:51:59 crc kubenswrapper[5102]: I0123 07:51:59.611145 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" path="/var/lib/kubelet/pods/7029efc0-e95a-4cb7-a549-3db92aac9081/volumes" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.875338 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b9d2x"] Jan 23 07:52:18 crc kubenswrapper[5102]: E0123 07:52:18.876305 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="extract-utilities" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.876473 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="extract-utilities" Jan 23 07:52:18 crc kubenswrapper[5102]: E0123 07:52:18.876523 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="registry-server" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.876561 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="registry-server" Jan 23 07:52:18 crc kubenswrapper[5102]: E0123 07:52:18.876607 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="extract-content" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.876622 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="extract-content" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.876898 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="7029efc0-e95a-4cb7-a549-3db92aac9081" containerName="registry-server" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.878734 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.892991 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b9d2x"] Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.986200 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-utilities\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.986285 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cbpbd\" (UniqueName: \"kubernetes.io/projected/947171b8-a440-416a-940f-8d0a64d6e0f7-kube-api-access-cbpbd\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:18 crc kubenswrapper[5102]: I0123 07:52:18.986346 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-catalog-content\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.087637 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-utilities\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.087708 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cbpbd\" (UniqueName: \"kubernetes.io/projected/947171b8-a440-416a-940f-8d0a64d6e0f7-kube-api-access-cbpbd\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.087741 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-catalog-content\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.088283 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-catalog-content\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.088389 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-utilities\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.116381 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cbpbd\" (UniqueName: \"kubernetes.io/projected/947171b8-a440-416a-940f-8d0a64d6e0f7-kube-api-access-cbpbd\") pod \"redhat-marketplace-b9d2x\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.214156 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.727223 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b9d2x"] Jan 23 07:52:19 crc kubenswrapper[5102]: I0123 07:52:19.765436 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b9d2x" event={"ID":"947171b8-a440-416a-940f-8d0a64d6e0f7","Type":"ContainerStarted","Data":"d903ea1651c123dfe3d9ada8c780a795176b98a0aab4988f47df2212868dda2a"} Jan 23 07:52:20 crc kubenswrapper[5102]: I0123 07:52:20.777181 5102 generic.go:334] "Generic (PLEG): container finished" podID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerID="54371bb0d48d82a11e5ed9023a39647d95f9c5635a79902872ef7c43f7340744" exitCode=0 Jan 23 07:52:20 crc kubenswrapper[5102]: I0123 07:52:20.777604 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b9d2x" event={"ID":"947171b8-a440-416a-940f-8d0a64d6e0f7","Type":"ContainerDied","Data":"54371bb0d48d82a11e5ed9023a39647d95f9c5635a79902872ef7c43f7340744"} Jan 23 07:52:22 crc kubenswrapper[5102]: I0123 07:52:22.798118 5102 generic.go:334] "Generic (PLEG): container finished" podID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerID="f615515b3afa341b9b92d78d44329b81c477e429109906aaac97fff9615d4540" exitCode=0 Jan 23 07:52:22 crc kubenswrapper[5102]: I0123 07:52:22.798598 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b9d2x" event={"ID":"947171b8-a440-416a-940f-8d0a64d6e0f7","Type":"ContainerDied","Data":"f615515b3afa341b9b92d78d44329b81c477e429109906aaac97fff9615d4540"} Jan 23 07:52:23 crc kubenswrapper[5102]: I0123 07:52:23.810661 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b9d2x" event={"ID":"947171b8-a440-416a-940f-8d0a64d6e0f7","Type":"ContainerStarted","Data":"0e7b1aeb2b143eb860fe4b26c437c659e02e365ccebbb6224b5a7f8586f540bc"} Jan 23 07:52:23 crc kubenswrapper[5102]: I0123 07:52:23.834140 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b9d2x" podStartSLOduration=3.373262706 podStartE2EDuration="5.834114097s" podCreationTimestamp="2026-01-23 07:52:18 +0000 UTC" firstStartedPulling="2026-01-23 07:52:20.780051435 +0000 UTC m=+3491.600400450" lastFinishedPulling="2026-01-23 07:52:23.240902856 +0000 UTC m=+3494.061251841" observedRunningTime="2026-01-23 07:52:23.828315888 +0000 UTC m=+3494.648664883" watchObservedRunningTime="2026-01-23 07:52:23.834114097 +0000 UTC m=+3494.654463112" Jan 23 07:52:29 crc kubenswrapper[5102]: I0123 07:52:29.214613 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:29 crc kubenswrapper[5102]: I0123 07:52:29.215034 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:29 crc kubenswrapper[5102]: I0123 07:52:29.312270 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:29 crc kubenswrapper[5102]: I0123 07:52:29.905704 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:29 crc kubenswrapper[5102]: I0123 07:52:29.957120 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b9d2x"] Jan 23 07:52:31 crc kubenswrapper[5102]: I0123 07:52:31.876515 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b9d2x" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="registry-server" containerID="cri-o://0e7b1aeb2b143eb860fe4b26c437c659e02e365ccebbb6224b5a7f8586f540bc" gracePeriod=2 Jan 23 07:52:32 crc kubenswrapper[5102]: I0123 07:52:32.892179 5102 generic.go:334] "Generic (PLEG): container finished" podID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerID="0e7b1aeb2b143eb860fe4b26c437c659e02e365ccebbb6224b5a7f8586f540bc" exitCode=0 Jan 23 07:52:32 crc kubenswrapper[5102]: I0123 07:52:32.892662 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b9d2x" event={"ID":"947171b8-a440-416a-940f-8d0a64d6e0f7","Type":"ContainerDied","Data":"0e7b1aeb2b143eb860fe4b26c437c659e02e365ccebbb6224b5a7f8586f540bc"} Jan 23 07:52:32 crc kubenswrapper[5102]: I0123 07:52:32.971784 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.098975 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-catalog-content\") pod \"947171b8-a440-416a-940f-8d0a64d6e0f7\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.099082 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-utilities\") pod \"947171b8-a440-416a-940f-8d0a64d6e0f7\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.099120 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cbpbd\" (UniqueName: \"kubernetes.io/projected/947171b8-a440-416a-940f-8d0a64d6e0f7-kube-api-access-cbpbd\") pod \"947171b8-a440-416a-940f-8d0a64d6e0f7\" (UID: \"947171b8-a440-416a-940f-8d0a64d6e0f7\") " Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.101015 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-utilities" (OuterVolumeSpecName: "utilities") pod "947171b8-a440-416a-940f-8d0a64d6e0f7" (UID: "947171b8-a440-416a-940f-8d0a64d6e0f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.107969 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/947171b8-a440-416a-940f-8d0a64d6e0f7-kube-api-access-cbpbd" (OuterVolumeSpecName: "kube-api-access-cbpbd") pod "947171b8-a440-416a-940f-8d0a64d6e0f7" (UID: "947171b8-a440-416a-940f-8d0a64d6e0f7"). InnerVolumeSpecName "kube-api-access-cbpbd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.154628 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "947171b8-a440-416a-940f-8d0a64d6e0f7" (UID: "947171b8-a440-416a-940f-8d0a64d6e0f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.201303 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.201351 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/947171b8-a440-416a-940f-8d0a64d6e0f7-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.201368 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cbpbd\" (UniqueName: \"kubernetes.io/projected/947171b8-a440-416a-940f-8d0a64d6e0f7-kube-api-access-cbpbd\") on node \"crc\" DevicePath \"\"" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.905119 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b9d2x" event={"ID":"947171b8-a440-416a-940f-8d0a64d6e0f7","Type":"ContainerDied","Data":"d903ea1651c123dfe3d9ada8c780a795176b98a0aab4988f47df2212868dda2a"} Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.905213 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b9d2x" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.905229 5102 scope.go:117] "RemoveContainer" containerID="0e7b1aeb2b143eb860fe4b26c437c659e02e365ccebbb6224b5a7f8586f540bc" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.948177 5102 scope.go:117] "RemoveContainer" containerID="f615515b3afa341b9b92d78d44329b81c477e429109906aaac97fff9615d4540" Jan 23 07:52:33 crc kubenswrapper[5102]: I0123 07:52:33.948390 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b9d2x"] Jan 23 07:52:34 crc kubenswrapper[5102]: I0123 07:52:34.039968 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b9d2x"] Jan 23 07:52:34 crc kubenswrapper[5102]: I0123 07:52:34.056074 5102 scope.go:117] "RemoveContainer" containerID="54371bb0d48d82a11e5ed9023a39647d95f9c5635a79902872ef7c43f7340744" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.234753 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-kgpvd"] Jan 23 07:52:35 crc kubenswrapper[5102]: E0123 07:52:35.235320 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="registry-server" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.235332 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="registry-server" Jan 23 07:52:35 crc kubenswrapper[5102]: E0123 07:52:35.235346 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="extract-utilities" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.235352 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="extract-utilities" Jan 23 07:52:35 crc kubenswrapper[5102]: E0123 07:52:35.235380 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="extract-content" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.235387 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="extract-content" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.235521 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" containerName="registry-server" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.236482 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.247690 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kgpvd"] Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.359734 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zwrqq\" (UniqueName: \"kubernetes.io/projected/28034f00-b5f1-450d-9654-4baecc814daa-kube-api-access-zwrqq\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.359807 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-utilities\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.359946 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-catalog-content\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.461682 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zwrqq\" (UniqueName: \"kubernetes.io/projected/28034f00-b5f1-450d-9654-4baecc814daa-kube-api-access-zwrqq\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.461748 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-utilities\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.461810 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-catalog-content\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.462268 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-catalog-content\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.462321 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-utilities\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.483601 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zwrqq\" (UniqueName: \"kubernetes.io/projected/28034f00-b5f1-450d-9654-4baecc814daa-kube-api-access-zwrqq\") pod \"certified-operators-kgpvd\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.553611 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.609907 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="947171b8-a440-416a-940f-8d0a64d6e0f7" path="/var/lib/kubelet/pods/947171b8-a440-416a-940f-8d0a64d6e0f7/volumes" Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.825818 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-kgpvd"] Jan 23 07:52:35 crc kubenswrapper[5102]: W0123 07:52:35.839854 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod28034f00_b5f1_450d_9654_4baecc814daa.slice/crio-a2cc9bbbdd2cb618147416f30e8dab02a22031c58d155491ba81c978fdaa6550 WatchSource:0}: Error finding container a2cc9bbbdd2cb618147416f30e8dab02a22031c58d155491ba81c978fdaa6550: Status 404 returned error can't find the container with id a2cc9bbbdd2cb618147416f30e8dab02a22031c58d155491ba81c978fdaa6550 Jan 23 07:52:35 crc kubenswrapper[5102]: I0123 07:52:35.928911 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgpvd" event={"ID":"28034f00-b5f1-450d-9654-4baecc814daa","Type":"ContainerStarted","Data":"a2cc9bbbdd2cb618147416f30e8dab02a22031c58d155491ba81c978fdaa6550"} Jan 23 07:52:36 crc kubenswrapper[5102]: I0123 07:52:36.941678 5102 generic.go:334] "Generic (PLEG): container finished" podID="28034f00-b5f1-450d-9654-4baecc814daa" containerID="badd3b02cd7a5e5d630f128ce9ba76c6d7d8a287e50a3b882cc0b27c24728bcc" exitCode=0 Jan 23 07:52:36 crc kubenswrapper[5102]: I0123 07:52:36.941791 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgpvd" event={"ID":"28034f00-b5f1-450d-9654-4baecc814daa","Type":"ContainerDied","Data":"badd3b02cd7a5e5d630f128ce9ba76c6d7d8a287e50a3b882cc0b27c24728bcc"} Jan 23 07:52:39 crc kubenswrapper[5102]: I0123 07:52:39.966341 5102 generic.go:334] "Generic (PLEG): container finished" podID="28034f00-b5f1-450d-9654-4baecc814daa" containerID="370464fed09b919ac2176adb2d6cf306bda87ee0cab288ebdbafce2c9e4ce7fa" exitCode=0 Jan 23 07:52:39 crc kubenswrapper[5102]: I0123 07:52:39.966437 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgpvd" event={"ID":"28034f00-b5f1-450d-9654-4baecc814daa","Type":"ContainerDied","Data":"370464fed09b919ac2176adb2d6cf306bda87ee0cab288ebdbafce2c9e4ce7fa"} Jan 23 07:52:40 crc kubenswrapper[5102]: I0123 07:52:40.977238 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgpvd" event={"ID":"28034f00-b5f1-450d-9654-4baecc814daa","Type":"ContainerStarted","Data":"8d6ee4179dabcb48fee28dd471f335ee500e8667f34a776c6fcc8eb7f43a1a5b"} Jan 23 07:52:41 crc kubenswrapper[5102]: I0123 07:52:41.013996 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-kgpvd" podStartSLOduration=2.615248918 podStartE2EDuration="6.013967975s" podCreationTimestamp="2026-01-23 07:52:35 +0000 UTC" firstStartedPulling="2026-01-23 07:52:36.944936672 +0000 UTC m=+3507.765285697" lastFinishedPulling="2026-01-23 07:52:40.343655749 +0000 UTC m=+3511.164004754" observedRunningTime="2026-01-23 07:52:40.998504519 +0000 UTC m=+3511.818853544" watchObservedRunningTime="2026-01-23 07:52:41.013967975 +0000 UTC m=+3511.834316940" Jan 23 07:52:46 crc kubenswrapper[5102]: I0123 07:52:46.036760 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:46 crc kubenswrapper[5102]: I0123 07:52:46.075169 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:46 crc kubenswrapper[5102]: I0123 07:52:46.119066 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:47 crc kubenswrapper[5102]: I0123 07:52:47.081912 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:47 crc kubenswrapper[5102]: I0123 07:52:47.136953 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kgpvd"] Jan 23 07:52:49 crc kubenswrapper[5102]: I0123 07:52:49.045607 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-kgpvd" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="registry-server" containerID="cri-o://8d6ee4179dabcb48fee28dd471f335ee500e8667f34a776c6fcc8eb7f43a1a5b" gracePeriod=2 Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.062452 5102 generic.go:334] "Generic (PLEG): container finished" podID="28034f00-b5f1-450d-9654-4baecc814daa" containerID="8d6ee4179dabcb48fee28dd471f335ee500e8667f34a776c6fcc8eb7f43a1a5b" exitCode=0 Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.062533 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgpvd" event={"ID":"28034f00-b5f1-450d-9654-4baecc814daa","Type":"ContainerDied","Data":"8d6ee4179dabcb48fee28dd471f335ee500e8667f34a776c6fcc8eb7f43a1a5b"} Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.602280 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.684125 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zwrqq\" (UniqueName: \"kubernetes.io/projected/28034f00-b5f1-450d-9654-4baecc814daa-kube-api-access-zwrqq\") pod \"28034f00-b5f1-450d-9654-4baecc814daa\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.684477 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-utilities\") pod \"28034f00-b5f1-450d-9654-4baecc814daa\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.684722 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-catalog-content\") pod \"28034f00-b5f1-450d-9654-4baecc814daa\" (UID: \"28034f00-b5f1-450d-9654-4baecc814daa\") " Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.685303 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-utilities" (OuterVolumeSpecName: "utilities") pod "28034f00-b5f1-450d-9654-4baecc814daa" (UID: "28034f00-b5f1-450d-9654-4baecc814daa"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.688433 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.689483 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/28034f00-b5f1-450d-9654-4baecc814daa-kube-api-access-zwrqq" (OuterVolumeSpecName: "kube-api-access-zwrqq") pod "28034f00-b5f1-450d-9654-4baecc814daa" (UID: "28034f00-b5f1-450d-9654-4baecc814daa"). InnerVolumeSpecName "kube-api-access-zwrqq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.771200 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "28034f00-b5f1-450d-9654-4baecc814daa" (UID: "28034f00-b5f1-450d-9654-4baecc814daa"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.789916 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zwrqq\" (UniqueName: \"kubernetes.io/projected/28034f00-b5f1-450d-9654-4baecc814daa-kube-api-access-zwrqq\") on node \"crc\" DevicePath \"\"" Jan 23 07:52:50 crc kubenswrapper[5102]: I0123 07:52:50.789972 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/28034f00-b5f1-450d-9654-4baecc814daa-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.076581 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-kgpvd" event={"ID":"28034f00-b5f1-450d-9654-4baecc814daa","Type":"ContainerDied","Data":"a2cc9bbbdd2cb618147416f30e8dab02a22031c58d155491ba81c978fdaa6550"} Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.076670 5102 scope.go:117] "RemoveContainer" containerID="8d6ee4179dabcb48fee28dd471f335ee500e8667f34a776c6fcc8eb7f43a1a5b" Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.076688 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-kgpvd" Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.110113 5102 scope.go:117] "RemoveContainer" containerID="370464fed09b919ac2176adb2d6cf306bda87ee0cab288ebdbafce2c9e4ce7fa" Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.137652 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-kgpvd"] Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.155021 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-kgpvd"] Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.157396 5102 scope.go:117] "RemoveContainer" containerID="badd3b02cd7a5e5d630f128ce9ba76c6d7d8a287e50a3b882cc0b27c24728bcc" Jan 23 07:52:51 crc kubenswrapper[5102]: I0123 07:52:51.615371 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="28034f00-b5f1-450d-9654-4baecc814daa" path="/var/lib/kubelet/pods/28034f00-b5f1-450d-9654-4baecc814daa/volumes" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.698164 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nst28"] Jan 23 07:53:21 crc kubenswrapper[5102]: E0123 07:53:21.699130 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="extract-utilities" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.699152 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="extract-utilities" Jan 23 07:53:21 crc kubenswrapper[5102]: E0123 07:53:21.699178 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="extract-content" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.699191 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="extract-content" Jan 23 07:53:21 crc kubenswrapper[5102]: E0123 07:53:21.699206 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="registry-server" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.699217 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="registry-server" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.699465 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="28034f00-b5f1-450d-9654-4baecc814daa" containerName="registry-server" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.700762 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.722555 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nst28"] Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.835973 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-catalog-content\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.836589 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d7vkf\" (UniqueName: \"kubernetes.io/projected/78b2b960-1ae2-4206-94af-1251af2b8615-kube-api-access-d7vkf\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.836843 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-utilities\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.938684 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-utilities\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.938754 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-catalog-content\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.938842 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d7vkf\" (UniqueName: \"kubernetes.io/projected/78b2b960-1ae2-4206-94af-1251af2b8615-kube-api-access-d7vkf\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.939825 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-catalog-content\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.940115 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-utilities\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:21 crc kubenswrapper[5102]: I0123 07:53:21.959420 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d7vkf\" (UniqueName: \"kubernetes.io/projected/78b2b960-1ae2-4206-94af-1251af2b8615-kube-api-access-d7vkf\") pod \"redhat-operators-nst28\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:22 crc kubenswrapper[5102]: I0123 07:53:22.056809 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:22 crc kubenswrapper[5102]: I0123 07:53:22.308620 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nst28"] Jan 23 07:53:22 crc kubenswrapper[5102]: W0123 07:53:22.323665 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod78b2b960_1ae2_4206_94af_1251af2b8615.slice/crio-9ef74860484ac8dedd1874bfb4b2edc9accbbb6c37559c931a519348f274c3bf WatchSource:0}: Error finding container 9ef74860484ac8dedd1874bfb4b2edc9accbbb6c37559c931a519348f274c3bf: Status 404 returned error can't find the container with id 9ef74860484ac8dedd1874bfb4b2edc9accbbb6c37559c931a519348f274c3bf Jan 23 07:53:22 crc kubenswrapper[5102]: I0123 07:53:22.375594 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nst28" event={"ID":"78b2b960-1ae2-4206-94af-1251af2b8615","Type":"ContainerStarted","Data":"9ef74860484ac8dedd1874bfb4b2edc9accbbb6c37559c931a519348f274c3bf"} Jan 23 07:53:23 crc kubenswrapper[5102]: I0123 07:53:23.384003 5102 generic.go:334] "Generic (PLEG): container finished" podID="78b2b960-1ae2-4206-94af-1251af2b8615" containerID="0ca0b035eb6b2345220f2db6a45d3c25bf1ccc20914553c6780cc4b9ed800eac" exitCode=0 Jan 23 07:53:23 crc kubenswrapper[5102]: I0123 07:53:23.384304 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nst28" event={"ID":"78b2b960-1ae2-4206-94af-1251af2b8615","Type":"ContainerDied","Data":"0ca0b035eb6b2345220f2db6a45d3c25bf1ccc20914553c6780cc4b9ed800eac"} Jan 23 07:53:24 crc kubenswrapper[5102]: I0123 07:53:24.393819 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nst28" event={"ID":"78b2b960-1ae2-4206-94af-1251af2b8615","Type":"ContainerStarted","Data":"a63a187d41d6c0e6e2da3ce253456f5cbd90f745b403ff12712896cbb04486e5"} Jan 23 07:53:25 crc kubenswrapper[5102]: I0123 07:53:25.406056 5102 generic.go:334] "Generic (PLEG): container finished" podID="78b2b960-1ae2-4206-94af-1251af2b8615" containerID="a63a187d41d6c0e6e2da3ce253456f5cbd90f745b403ff12712896cbb04486e5" exitCode=0 Jan 23 07:53:25 crc kubenswrapper[5102]: I0123 07:53:25.406097 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nst28" event={"ID":"78b2b960-1ae2-4206-94af-1251af2b8615","Type":"ContainerDied","Data":"a63a187d41d6c0e6e2da3ce253456f5cbd90f745b403ff12712896cbb04486e5"} Jan 23 07:53:26 crc kubenswrapper[5102]: I0123 07:53:26.440086 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nst28" event={"ID":"78b2b960-1ae2-4206-94af-1251af2b8615","Type":"ContainerStarted","Data":"d22bbc6d5f9b864e932ef9e718212bbbd71833fca368df6e71654e2acf564e59"} Jan 23 07:53:26 crc kubenswrapper[5102]: I0123 07:53:26.478461 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nst28" podStartSLOduration=2.992319909 podStartE2EDuration="5.478429238s" podCreationTimestamp="2026-01-23 07:53:21 +0000 UTC" firstStartedPulling="2026-01-23 07:53:23.388580335 +0000 UTC m=+3554.208929300" lastFinishedPulling="2026-01-23 07:53:25.874689644 +0000 UTC m=+3556.695038629" observedRunningTime="2026-01-23 07:53:26.46841357 +0000 UTC m=+3557.288762595" watchObservedRunningTime="2026-01-23 07:53:26.478429238 +0000 UTC m=+3557.298778253" Jan 23 07:53:32 crc kubenswrapper[5102]: I0123 07:53:32.057698 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:32 crc kubenswrapper[5102]: I0123 07:53:32.058321 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:33 crc kubenswrapper[5102]: I0123 07:53:33.121730 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nst28" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="registry-server" probeResult="failure" output=< Jan 23 07:53:33 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 07:53:33 crc kubenswrapper[5102]: > Jan 23 07:53:42 crc kubenswrapper[5102]: I0123 07:53:42.134805 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:42 crc kubenswrapper[5102]: I0123 07:53:42.217503 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:42 crc kubenswrapper[5102]: I0123 07:53:42.387474 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nst28"] Jan 23 07:53:43 crc kubenswrapper[5102]: I0123 07:53:43.576479 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nst28" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="registry-server" containerID="cri-o://d22bbc6d5f9b864e932ef9e718212bbbd71833fca368df6e71654e2acf564e59" gracePeriod=2 Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.607052 5102 generic.go:334] "Generic (PLEG): container finished" podID="78b2b960-1ae2-4206-94af-1251af2b8615" containerID="d22bbc6d5f9b864e932ef9e718212bbbd71833fca368df6e71654e2acf564e59" exitCode=0 Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.607374 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nst28" event={"ID":"78b2b960-1ae2-4206-94af-1251af2b8615","Type":"ContainerDied","Data":"d22bbc6d5f9b864e932ef9e718212bbbd71833fca368df6e71654e2acf564e59"} Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.803493 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.982287 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d7vkf\" (UniqueName: \"kubernetes.io/projected/78b2b960-1ae2-4206-94af-1251af2b8615-kube-api-access-d7vkf\") pod \"78b2b960-1ae2-4206-94af-1251af2b8615\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.982332 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-utilities\") pod \"78b2b960-1ae2-4206-94af-1251af2b8615\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.982375 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-catalog-content\") pod \"78b2b960-1ae2-4206-94af-1251af2b8615\" (UID: \"78b2b960-1ae2-4206-94af-1251af2b8615\") " Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.984214 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-utilities" (OuterVolumeSpecName: "utilities") pod "78b2b960-1ae2-4206-94af-1251af2b8615" (UID: "78b2b960-1ae2-4206-94af-1251af2b8615"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:53:45 crc kubenswrapper[5102]: I0123 07:53:45.987880 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/78b2b960-1ae2-4206-94af-1251af2b8615-kube-api-access-d7vkf" (OuterVolumeSpecName: "kube-api-access-d7vkf") pod "78b2b960-1ae2-4206-94af-1251af2b8615" (UID: "78b2b960-1ae2-4206-94af-1251af2b8615"). InnerVolumeSpecName "kube-api-access-d7vkf". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.084298 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d7vkf\" (UniqueName: \"kubernetes.io/projected/78b2b960-1ae2-4206-94af-1251af2b8615-kube-api-access-d7vkf\") on node \"crc\" DevicePath \"\"" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.084348 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.178954 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "78b2b960-1ae2-4206-94af-1251af2b8615" (UID: "78b2b960-1ae2-4206-94af-1251af2b8615"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.185660 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/78b2b960-1ae2-4206-94af-1251af2b8615-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.626071 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nst28" event={"ID":"78b2b960-1ae2-4206-94af-1251af2b8615","Type":"ContainerDied","Data":"9ef74860484ac8dedd1874bfb4b2edc9accbbb6c37559c931a519348f274c3bf"} Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.626111 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nst28" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.626198 5102 scope.go:117] "RemoveContainer" containerID="d22bbc6d5f9b864e932ef9e718212bbbd71833fca368df6e71654e2acf564e59" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.669511 5102 scope.go:117] "RemoveContainer" containerID="a63a187d41d6c0e6e2da3ce253456f5cbd90f745b403ff12712896cbb04486e5" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.679718 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nst28"] Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.689008 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nst28"] Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.693686 5102 scope.go:117] "RemoveContainer" containerID="0ca0b035eb6b2345220f2db6a45d3c25bf1ccc20914553c6780cc4b9ed800eac" Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.768952 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:53:46 crc kubenswrapper[5102]: I0123 07:53:46.769036 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:53:47 crc kubenswrapper[5102]: I0123 07:53:47.613191 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" path="/var/lib/kubelet/pods/78b2b960-1ae2-4206-94af-1251af2b8615/volumes" Jan 23 07:54:16 crc kubenswrapper[5102]: I0123 07:54:16.802039 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:54:16 crc kubenswrapper[5102]: I0123 07:54:16.802850 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:54:46 crc kubenswrapper[5102]: I0123 07:54:46.768824 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:54:46 crc kubenswrapper[5102]: I0123 07:54:46.769376 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:54:46 crc kubenswrapper[5102]: I0123 07:54:46.769440 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:54:46 crc kubenswrapper[5102]: I0123 07:54:46.770201 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"6fc6a962fac80ad1c96a176a0df58afe0d320b534e453bb191a6ffab6d9850a9"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:54:46 crc kubenswrapper[5102]: I0123 07:54:46.770281 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://6fc6a962fac80ad1c96a176a0df58afe0d320b534e453bb191a6ffab6d9850a9" gracePeriod=600 Jan 23 07:54:47 crc kubenswrapper[5102]: I0123 07:54:47.168024 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="6fc6a962fac80ad1c96a176a0df58afe0d320b534e453bb191a6ffab6d9850a9" exitCode=0 Jan 23 07:54:47 crc kubenswrapper[5102]: I0123 07:54:47.168096 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"6fc6a962fac80ad1c96a176a0df58afe0d320b534e453bb191a6ffab6d9850a9"} Jan 23 07:54:47 crc kubenswrapper[5102]: I0123 07:54:47.168404 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1"} Jan 23 07:54:47 crc kubenswrapper[5102]: I0123 07:54:47.168433 5102 scope.go:117] "RemoveContainer" containerID="5f06b005b57c25a49a60b4699eaf84db8ea8d76b0beac7453c6b5d848535141f" Jan 23 07:57:16 crc kubenswrapper[5102]: I0123 07:57:16.768640 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:57:16 crc kubenswrapper[5102]: I0123 07:57:16.769327 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:57:46 crc kubenswrapper[5102]: I0123 07:57:46.767993 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:57:46 crc kubenswrapper[5102]: I0123 07:57:46.768863 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:58:16 crc kubenswrapper[5102]: I0123 07:58:16.769181 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 07:58:16 crc kubenswrapper[5102]: I0123 07:58:16.770017 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 07:58:16 crc kubenswrapper[5102]: I0123 07:58:16.770104 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 07:58:16 crc kubenswrapper[5102]: I0123 07:58:16.771255 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 07:58:16 crc kubenswrapper[5102]: I0123 07:58:16.771367 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" gracePeriod=600 Jan 23 07:58:16 crc kubenswrapper[5102]: E0123 07:58:16.901872 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:58:17 crc kubenswrapper[5102]: I0123 07:58:17.191528 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" exitCode=0 Jan 23 07:58:17 crc kubenswrapper[5102]: I0123 07:58:17.191608 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1"} Jan 23 07:58:17 crc kubenswrapper[5102]: I0123 07:58:17.191690 5102 scope.go:117] "RemoveContainer" containerID="6fc6a962fac80ad1c96a176a0df58afe0d320b534e453bb191a6ffab6d9850a9" Jan 23 07:58:17 crc kubenswrapper[5102]: I0123 07:58:17.192198 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:58:17 crc kubenswrapper[5102]: E0123 07:58:17.192494 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:58:29 crc kubenswrapper[5102]: I0123 07:58:29.607026 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:58:29 crc kubenswrapper[5102]: E0123 07:58:29.607978 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:58:44 crc kubenswrapper[5102]: I0123 07:58:44.599373 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:58:44 crc kubenswrapper[5102]: E0123 07:58:44.600919 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:58:58 crc kubenswrapper[5102]: I0123 07:58:58.598461 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:58:58 crc kubenswrapper[5102]: E0123 07:58:58.599255 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:59:12 crc kubenswrapper[5102]: I0123 07:59:12.599005 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:59:12 crc kubenswrapper[5102]: E0123 07:59:12.600140 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:59:24 crc kubenswrapper[5102]: I0123 07:59:24.598304 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:59:24 crc kubenswrapper[5102]: E0123 07:59:24.599177 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:59:37 crc kubenswrapper[5102]: I0123 07:59:37.599930 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:59:37 crc kubenswrapper[5102]: E0123 07:59:37.601080 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 07:59:48 crc kubenswrapper[5102]: I0123 07:59:48.598935 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 07:59:48 crc kubenswrapper[5102]: E0123 07:59:48.600133 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.177026 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh"] Jan 23 08:00:00 crc kubenswrapper[5102]: E0123 08:00:00.178135 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="registry-server" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.178156 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="registry-server" Jan 23 08:00:00 crc kubenswrapper[5102]: E0123 08:00:00.178172 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="extract-content" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.178179 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="extract-content" Jan 23 08:00:00 crc kubenswrapper[5102]: E0123 08:00:00.178194 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="extract-utilities" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.178201 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="extract-utilities" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.178511 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="78b2b960-1ae2-4206-94af-1251af2b8615" containerName="registry-server" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.179117 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.186066 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.188112 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.195439 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh"] Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.265255 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vhvf6\" (UniqueName: \"kubernetes.io/projected/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-kube-api-access-vhvf6\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.265375 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-secret-volume\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.265417 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-config-volume\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.366614 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-secret-volume\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.366679 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-config-volume\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.366744 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vhvf6\" (UniqueName: \"kubernetes.io/projected/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-kube-api-access-vhvf6\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.368386 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-config-volume\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.373238 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-secret-volume\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.395486 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vhvf6\" (UniqueName: \"kubernetes.io/projected/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-kube-api-access-vhvf6\") pod \"collect-profiles-29485920-h8pqh\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:00 crc kubenswrapper[5102]: I0123 08:00:00.514035 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:01 crc kubenswrapper[5102]: I0123 08:00:01.069830 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh"] Jan 23 08:00:01 crc kubenswrapper[5102]: I0123 08:00:01.145168 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" event={"ID":"b3c6feb8-053e-492c-ad70-7c44e58ea9a2","Type":"ContainerStarted","Data":"6c7cdb3bb2cac91bcfacef857c42c869c75015f2116b6cb5ef7f6c38fe80847d"} Jan 23 08:00:02 crc kubenswrapper[5102]: I0123 08:00:02.152612 5102 generic.go:334] "Generic (PLEG): container finished" podID="b3c6feb8-053e-492c-ad70-7c44e58ea9a2" containerID="fb7b9b321b94ac5fcbc87c4f68ec6a189414a6af89a76fe981a0a3fef6eb1f70" exitCode=0 Jan 23 08:00:02 crc kubenswrapper[5102]: I0123 08:00:02.152690 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" event={"ID":"b3c6feb8-053e-492c-ad70-7c44e58ea9a2","Type":"ContainerDied","Data":"fb7b9b321b94ac5fcbc87c4f68ec6a189414a6af89a76fe981a0a3fef6eb1f70"} Jan 23 08:00:02 crc kubenswrapper[5102]: I0123 08:00:02.598202 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:00:02 crc kubenswrapper[5102]: E0123 08:00:02.598985 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.648759 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.716954 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vhvf6\" (UniqueName: \"kubernetes.io/projected/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-kube-api-access-vhvf6\") pod \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.717073 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-config-volume\") pod \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.717268 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-secret-volume\") pod \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\" (UID: \"b3c6feb8-053e-492c-ad70-7c44e58ea9a2\") " Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.718792 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-config-volume" (OuterVolumeSpecName: "config-volume") pod "b3c6feb8-053e-492c-ad70-7c44e58ea9a2" (UID: "b3c6feb8-053e-492c-ad70-7c44e58ea9a2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.722483 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b3c6feb8-053e-492c-ad70-7c44e58ea9a2" (UID: "b3c6feb8-053e-492c-ad70-7c44e58ea9a2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.723364 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-kube-api-access-vhvf6" (OuterVolumeSpecName: "kube-api-access-vhvf6") pod "b3c6feb8-053e-492c-ad70-7c44e58ea9a2" (UID: "b3c6feb8-053e-492c-ad70-7c44e58ea9a2"). InnerVolumeSpecName "kube-api-access-vhvf6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.819267 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.819343 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:00:03 crc kubenswrapper[5102]: I0123 08:00:03.819373 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vhvf6\" (UniqueName: \"kubernetes.io/projected/b3c6feb8-053e-492c-ad70-7c44e58ea9a2-kube-api-access-vhvf6\") on node \"crc\" DevicePath \"\"" Jan 23 08:00:04 crc kubenswrapper[5102]: I0123 08:00:04.178704 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" event={"ID":"b3c6feb8-053e-492c-ad70-7c44e58ea9a2","Type":"ContainerDied","Data":"6c7cdb3bb2cac91bcfacef857c42c869c75015f2116b6cb5ef7f6c38fe80847d"} Jan 23 08:00:04 crc kubenswrapper[5102]: I0123 08:00:04.178745 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="6c7cdb3bb2cac91bcfacef857c42c869c75015f2116b6cb5ef7f6c38fe80847d" Jan 23 08:00:04 crc kubenswrapper[5102]: I0123 08:00:04.178778 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh" Jan 23 08:00:04 crc kubenswrapper[5102]: I0123 08:00:04.737841 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd"] Jan 23 08:00:04 crc kubenswrapper[5102]: I0123 08:00:04.744563 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485875-zs5hd"] Jan 23 08:00:05 crc kubenswrapper[5102]: I0123 08:00:05.615023 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d3333009-db65-4416-8fee-26e53bd734cc" path="/var/lib/kubelet/pods/d3333009-db65-4416-8fee-26e53bd734cc/volumes" Jan 23 08:00:16 crc kubenswrapper[5102]: I0123 08:00:16.238983 5102 scope.go:117] "RemoveContainer" containerID="9ec61b7cca495dfcdd0f7462fc5b68e697a6891ec49580d4bec120d4330cecc7" Jan 23 08:00:17 crc kubenswrapper[5102]: I0123 08:00:17.598275 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:00:17 crc kubenswrapper[5102]: E0123 08:00:17.599017 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:00:28 crc kubenswrapper[5102]: I0123 08:00:28.598406 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:00:28 crc kubenswrapper[5102]: E0123 08:00:28.598906 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:00:43 crc kubenswrapper[5102]: I0123 08:00:43.598996 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:00:43 crc kubenswrapper[5102]: E0123 08:00:43.599575 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:00:58 crc kubenswrapper[5102]: I0123 08:00:58.598038 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:00:58 crc kubenswrapper[5102]: E0123 08:00:58.598818 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:01:09 crc kubenswrapper[5102]: I0123 08:01:09.606245 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:01:09 crc kubenswrapper[5102]: E0123 08:01:09.606993 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:01:23 crc kubenswrapper[5102]: I0123 08:01:23.599270 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:01:23 crc kubenswrapper[5102]: E0123 08:01:23.600396 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:01:38 crc kubenswrapper[5102]: I0123 08:01:38.599097 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:01:38 crc kubenswrapper[5102]: E0123 08:01:38.600223 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:01:53 crc kubenswrapper[5102]: I0123 08:01:53.597920 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:01:53 crc kubenswrapper[5102]: E0123 08:01:53.598714 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:02:04 crc kubenswrapper[5102]: I0123 08:02:04.598483 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:02:04 crc kubenswrapper[5102]: E0123 08:02:04.599516 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:02:15 crc kubenswrapper[5102]: I0123 08:02:15.598774 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:02:15 crc kubenswrapper[5102]: E0123 08:02:15.599499 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:02:27 crc kubenswrapper[5102]: I0123 08:02:27.598341 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:02:27 crc kubenswrapper[5102]: E0123 08:02:27.599267 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:02:40 crc kubenswrapper[5102]: I0123 08:02:40.598579 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:02:40 crc kubenswrapper[5102]: E0123 08:02:40.599655 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.236757 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tbxf7"] Jan 23 08:02:51 crc kubenswrapper[5102]: E0123 08:02:51.237531 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b3c6feb8-053e-492c-ad70-7c44e58ea9a2" containerName="collect-profiles" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.237562 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b3c6feb8-053e-492c-ad70-7c44e58ea9a2" containerName="collect-profiles" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.237703 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b3c6feb8-053e-492c-ad70-7c44e58ea9a2" containerName="collect-profiles" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.238896 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.257872 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tbxf7"] Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.406225 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfkjm\" (UniqueName: \"kubernetes.io/projected/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-kube-api-access-kfkjm\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.406290 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-catalog-content\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.406391 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-utilities\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.507334 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-utilities\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.507426 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfkjm\" (UniqueName: \"kubernetes.io/projected/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-kube-api-access-kfkjm\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.507467 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-catalog-content\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.507809 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-utilities\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.507872 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-catalog-content\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.528797 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfkjm\" (UniqueName: \"kubernetes.io/projected/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-kube-api-access-kfkjm\") pod \"community-operators-tbxf7\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.557017 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:02:51 crc kubenswrapper[5102]: I0123 08:02:51.599018 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:02:51 crc kubenswrapper[5102]: E0123 08:02:51.599249 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:02:52 crc kubenswrapper[5102]: I0123 08:02:52.078921 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tbxf7"] Jan 23 08:02:53 crc kubenswrapper[5102]: I0123 08:02:53.086685 5102 generic.go:334] "Generic (PLEG): container finished" podID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerID="3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11" exitCode=0 Jan 23 08:02:53 crc kubenswrapper[5102]: I0123 08:02:53.086788 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tbxf7" event={"ID":"12c3cb7a-075b-46e8-a9b3-2119a7a220b9","Type":"ContainerDied","Data":"3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11"} Jan 23 08:02:53 crc kubenswrapper[5102]: I0123 08:02:53.087048 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tbxf7" event={"ID":"12c3cb7a-075b-46e8-a9b3-2119a7a220b9","Type":"ContainerStarted","Data":"2dd503c5780022a6e008fdad9421f605ba842ad70f23858c403d9cede3d99d20"} Jan 23 08:02:53 crc kubenswrapper[5102]: I0123 08:02:53.089702 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 08:02:54 crc kubenswrapper[5102]: I0123 08:02:54.095789 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tbxf7" event={"ID":"12c3cb7a-075b-46e8-a9b3-2119a7a220b9","Type":"ContainerStarted","Data":"2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752"} Jan 23 08:02:55 crc kubenswrapper[5102]: I0123 08:02:55.126890 5102 generic.go:334] "Generic (PLEG): container finished" podID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerID="2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752" exitCode=0 Jan 23 08:02:55 crc kubenswrapper[5102]: I0123 08:02:55.127004 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tbxf7" event={"ID":"12c3cb7a-075b-46e8-a9b3-2119a7a220b9","Type":"ContainerDied","Data":"2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752"} Jan 23 08:02:56 crc kubenswrapper[5102]: I0123 08:02:56.135358 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tbxf7" event={"ID":"12c3cb7a-075b-46e8-a9b3-2119a7a220b9","Type":"ContainerStarted","Data":"db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb"} Jan 23 08:02:56 crc kubenswrapper[5102]: I0123 08:02:56.156159 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tbxf7" podStartSLOduration=2.61601742 podStartE2EDuration="5.156138014s" podCreationTimestamp="2026-01-23 08:02:51 +0000 UTC" firstStartedPulling="2026-01-23 08:02:53.089358545 +0000 UTC m=+4123.909707530" lastFinishedPulling="2026-01-23 08:02:55.629479149 +0000 UTC m=+4126.449828124" observedRunningTime="2026-01-23 08:02:56.153209034 +0000 UTC m=+4126.973558029" watchObservedRunningTime="2026-01-23 08:02:56.156138014 +0000 UTC m=+4126.976486989" Jan 23 08:03:01 crc kubenswrapper[5102]: I0123 08:03:01.557498 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:03:01 crc kubenswrapper[5102]: I0123 08:03:01.557982 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:03:01 crc kubenswrapper[5102]: I0123 08:03:01.642744 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:03:02 crc kubenswrapper[5102]: I0123 08:03:02.235213 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:03:02 crc kubenswrapper[5102]: I0123 08:03:02.299249 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tbxf7"] Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.199289 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tbxf7" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="registry-server" containerID="cri-o://db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb" gracePeriod=2 Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.904827 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.935109 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfkjm\" (UniqueName: \"kubernetes.io/projected/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-kube-api-access-kfkjm\") pod \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.935163 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-catalog-content\") pod \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.935265 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-utilities\") pod \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\" (UID: \"12c3cb7a-075b-46e8-a9b3-2119a7a220b9\") " Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.936867 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-utilities" (OuterVolumeSpecName: "utilities") pod "12c3cb7a-075b-46e8-a9b3-2119a7a220b9" (UID: "12c3cb7a-075b-46e8-a9b3-2119a7a220b9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.944967 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-kube-api-access-kfkjm" (OuterVolumeSpecName: "kube-api-access-kfkjm") pod "12c3cb7a-075b-46e8-a9b3-2119a7a220b9" (UID: "12c3cb7a-075b-46e8-a9b3-2119a7a220b9"). InnerVolumeSpecName "kube-api-access-kfkjm". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:03:04 crc kubenswrapper[5102]: I0123 08:03:04.989639 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "12c3cb7a-075b-46e8-a9b3-2119a7a220b9" (UID: "12c3cb7a-075b-46e8-a9b3-2119a7a220b9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.036590 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.036621 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfkjm\" (UniqueName: \"kubernetes.io/projected/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-kube-api-access-kfkjm\") on node \"crc\" DevicePath \"\"" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.036630 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/12c3cb7a-075b-46e8-a9b3-2119a7a220b9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.210313 5102 generic.go:334] "Generic (PLEG): container finished" podID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerID="db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb" exitCode=0 Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.210353 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tbxf7" event={"ID":"12c3cb7a-075b-46e8-a9b3-2119a7a220b9","Type":"ContainerDied","Data":"db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb"} Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.210377 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tbxf7" event={"ID":"12c3cb7a-075b-46e8-a9b3-2119a7a220b9","Type":"ContainerDied","Data":"2dd503c5780022a6e008fdad9421f605ba842ad70f23858c403d9cede3d99d20"} Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.210392 5102 scope.go:117] "RemoveContainer" containerID="db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.210442 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tbxf7" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.246519 5102 scope.go:117] "RemoveContainer" containerID="2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.266889 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tbxf7"] Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.281328 5102 scope.go:117] "RemoveContainer" containerID="3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.282847 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tbxf7"] Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.305757 5102 scope.go:117] "RemoveContainer" containerID="db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb" Jan 23 08:03:05 crc kubenswrapper[5102]: E0123 08:03:05.306315 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb\": container with ID starting with db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb not found: ID does not exist" containerID="db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.306364 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb"} err="failed to get container status \"db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb\": rpc error: code = NotFound desc = could not find container \"db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb\": container with ID starting with db5b980adf0a98ac344126fbe5eb45a3fc9a451c56c3b33f2d368e4e2b57b0eb not found: ID does not exist" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.306391 5102 scope.go:117] "RemoveContainer" containerID="2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752" Jan 23 08:03:05 crc kubenswrapper[5102]: E0123 08:03:05.306914 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752\": container with ID starting with 2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752 not found: ID does not exist" containerID="2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.306978 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752"} err="failed to get container status \"2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752\": rpc error: code = NotFound desc = could not find container \"2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752\": container with ID starting with 2faec49414c5cf0a5f64930b437beec77cbbf99ab9e530dc7d59ce3ca200b752 not found: ID does not exist" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.307020 5102 scope.go:117] "RemoveContainer" containerID="3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11" Jan 23 08:03:05 crc kubenswrapper[5102]: E0123 08:03:05.307465 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11\": container with ID starting with 3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11 not found: ID does not exist" containerID="3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.307523 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11"} err="failed to get container status \"3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11\": rpc error: code = NotFound desc = could not find container \"3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11\": container with ID starting with 3b8a13d1bc487d4ba3fc004a251cf4fa69782b29f5f8246e0e0eb3a9932a5d11 not found: ID does not exist" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.598267 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:03:05 crc kubenswrapper[5102]: E0123 08:03:05.598828 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:03:05 crc kubenswrapper[5102]: I0123 08:03:05.612925 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" path="/var/lib/kubelet/pods/12c3cb7a-075b-46e8-a9b3-2119a7a220b9/volumes" Jan 23 08:03:19 crc kubenswrapper[5102]: I0123 08:03:19.608135 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:03:20 crc kubenswrapper[5102]: I0123 08:03:20.346266 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"f29859ad4dc3279d392ad03a81703e8f7bc70e2cf416c7b044fcf441781bf993"} Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.663889 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-cgjln"] Jan 23 08:03:35 crc kubenswrapper[5102]: E0123 08:03:35.664660 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="extract-utilities" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.664671 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="extract-utilities" Jan 23 08:03:35 crc kubenswrapper[5102]: E0123 08:03:35.664699 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="registry-server" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.664705 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="registry-server" Jan 23 08:03:35 crc kubenswrapper[5102]: E0123 08:03:35.664722 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="extract-content" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.664728 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="extract-content" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.664891 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="12c3cb7a-075b-46e8-a9b3-2119a7a220b9" containerName="registry-server" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.665955 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.677630 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cgjln"] Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.835025 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-utilities\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.835117 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-catalog-content\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.835190 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qbt29\" (UniqueName: \"kubernetes.io/projected/526a6267-0021-4287-a936-56aeef729c59-kube-api-access-qbt29\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.936317 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qbt29\" (UniqueName: \"kubernetes.io/projected/526a6267-0021-4287-a936-56aeef729c59-kube-api-access-qbt29\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.936409 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-utilities\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.936441 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-catalog-content\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.936958 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-catalog-content\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.937219 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-utilities\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.992331 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qbt29\" (UniqueName: \"kubernetes.io/projected/526a6267-0021-4287-a936-56aeef729c59-kube-api-access-qbt29\") pod \"redhat-marketplace-cgjln\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:35 crc kubenswrapper[5102]: I0123 08:03:35.998800 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:36 crc kubenswrapper[5102]: I0123 08:03:36.234461 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-cgjln"] Jan 23 08:03:36 crc kubenswrapper[5102]: I0123 08:03:36.482935 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cgjln" event={"ID":"526a6267-0021-4287-a936-56aeef729c59","Type":"ContainerStarted","Data":"1bb247e1528225e5d7d701689f32fa91559c2a62033fa574461c2094485d39f5"} Jan 23 08:03:37 crc kubenswrapper[5102]: I0123 08:03:37.497614 5102 generic.go:334] "Generic (PLEG): container finished" podID="526a6267-0021-4287-a936-56aeef729c59" containerID="d84a532804c2208c18e3690861da9b79c497a023064d524f5e58b985af1d2e90" exitCode=0 Jan 23 08:03:37 crc kubenswrapper[5102]: I0123 08:03:37.497671 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cgjln" event={"ID":"526a6267-0021-4287-a936-56aeef729c59","Type":"ContainerDied","Data":"d84a532804c2208c18e3690861da9b79c497a023064d524f5e58b985af1d2e90"} Jan 23 08:03:40 crc kubenswrapper[5102]: I0123 08:03:40.524242 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cgjln" event={"ID":"526a6267-0021-4287-a936-56aeef729c59","Type":"ContainerStarted","Data":"0967917b4b438ffb28aaa529291b57cd71b7709a47d1ac12e64dbd027a0becdc"} Jan 23 08:03:41 crc kubenswrapper[5102]: I0123 08:03:41.533303 5102 generic.go:334] "Generic (PLEG): container finished" podID="526a6267-0021-4287-a936-56aeef729c59" containerID="0967917b4b438ffb28aaa529291b57cd71b7709a47d1ac12e64dbd027a0becdc" exitCode=0 Jan 23 08:03:41 crc kubenswrapper[5102]: I0123 08:03:41.533356 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cgjln" event={"ID":"526a6267-0021-4287-a936-56aeef729c59","Type":"ContainerDied","Data":"0967917b4b438ffb28aaa529291b57cd71b7709a47d1ac12e64dbd027a0becdc"} Jan 23 08:03:42 crc kubenswrapper[5102]: I0123 08:03:42.542715 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cgjln" event={"ID":"526a6267-0021-4287-a936-56aeef729c59","Type":"ContainerStarted","Data":"b7d5290a8eed6e5478bb2b62868db4c75660513fd5a2495cb28734f7bca59e6c"} Jan 23 08:03:42 crc kubenswrapper[5102]: I0123 08:03:42.565737 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-cgjln" podStartSLOduration=3.069912039 podStartE2EDuration="7.565713759s" podCreationTimestamp="2026-01-23 08:03:35 +0000 UTC" firstStartedPulling="2026-01-23 08:03:37.499998401 +0000 UTC m=+4168.320347396" lastFinishedPulling="2026-01-23 08:03:41.995800101 +0000 UTC m=+4172.816149116" observedRunningTime="2026-01-23 08:03:42.559983803 +0000 UTC m=+4173.380332818" watchObservedRunningTime="2026-01-23 08:03:42.565713759 +0000 UTC m=+4173.386062744" Jan 23 08:03:45 crc kubenswrapper[5102]: I0123 08:03:45.999062 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:45 crc kubenswrapper[5102]: I0123 08:03:45.999122 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:46 crc kubenswrapper[5102]: I0123 08:03:46.047989 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:56 crc kubenswrapper[5102]: I0123 08:03:56.080010 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:56 crc kubenswrapper[5102]: I0123 08:03:56.123144 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cgjln"] Jan 23 08:03:56 crc kubenswrapper[5102]: I0123 08:03:56.652134 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-cgjln" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="registry-server" containerID="cri-o://b7d5290a8eed6e5478bb2b62868db4c75660513fd5a2495cb28734f7bca59e6c" gracePeriod=2 Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.664839 5102 generic.go:334] "Generic (PLEG): container finished" podID="526a6267-0021-4287-a936-56aeef729c59" containerID="b7d5290a8eed6e5478bb2b62868db4c75660513fd5a2495cb28734f7bca59e6c" exitCode=0 Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.664967 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cgjln" event={"ID":"526a6267-0021-4287-a936-56aeef729c59","Type":"ContainerDied","Data":"b7d5290a8eed6e5478bb2b62868db4c75660513fd5a2495cb28734f7bca59e6c"} Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.739056 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-2srpq"] Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.742319 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.756849 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2srpq"] Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.826342 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-catalog-content\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.826495 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-utilities\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.826591 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlq92\" (UniqueName: \"kubernetes.io/projected/1c367f44-edea-4453-8219-0ec010ee30bb-kube-api-access-vlq92\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.928416 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlq92\" (UniqueName: \"kubernetes.io/projected/1c367f44-edea-4453-8219-0ec010ee30bb-kube-api-access-vlq92\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.928500 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-catalog-content\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.928614 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-utilities\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.929082 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-utilities\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.929465 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-catalog-content\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:57 crc kubenswrapper[5102]: I0123 08:03:57.960372 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlq92\" (UniqueName: \"kubernetes.io/projected/1c367f44-edea-4453-8219-0ec010ee30bb-kube-api-access-vlq92\") pod \"certified-operators-2srpq\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.067227 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.183919 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.232153 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-utilities\") pod \"526a6267-0021-4287-a936-56aeef729c59\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.232200 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qbt29\" (UniqueName: \"kubernetes.io/projected/526a6267-0021-4287-a936-56aeef729c59-kube-api-access-qbt29\") pod \"526a6267-0021-4287-a936-56aeef729c59\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.232237 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-catalog-content\") pod \"526a6267-0021-4287-a936-56aeef729c59\" (UID: \"526a6267-0021-4287-a936-56aeef729c59\") " Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.234339 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-utilities" (OuterVolumeSpecName: "utilities") pod "526a6267-0021-4287-a936-56aeef729c59" (UID: "526a6267-0021-4287-a936-56aeef729c59"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.248798 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/526a6267-0021-4287-a936-56aeef729c59-kube-api-access-qbt29" (OuterVolumeSpecName: "kube-api-access-qbt29") pod "526a6267-0021-4287-a936-56aeef729c59" (UID: "526a6267-0021-4287-a936-56aeef729c59"). InnerVolumeSpecName "kube-api-access-qbt29". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.284000 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "526a6267-0021-4287-a936-56aeef729c59" (UID: "526a6267-0021-4287-a936-56aeef729c59"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.334242 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.334279 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qbt29\" (UniqueName: \"kubernetes.io/projected/526a6267-0021-4287-a936-56aeef729c59-kube-api-access-qbt29\") on node \"crc\" DevicePath \"\"" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.334290 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/526a6267-0021-4287-a936-56aeef729c59-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.516785 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-2srpq"] Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.674993 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-cgjln" event={"ID":"526a6267-0021-4287-a936-56aeef729c59","Type":"ContainerDied","Data":"1bb247e1528225e5d7d701689f32fa91559c2a62033fa574461c2094485d39f5"} Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.675023 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-cgjln" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.675060 5102 scope.go:117] "RemoveContainer" containerID="b7d5290a8eed6e5478bb2b62868db4c75660513fd5a2495cb28734f7bca59e6c" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.677768 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2srpq" event={"ID":"1c367f44-edea-4453-8219-0ec010ee30bb","Type":"ContainerStarted","Data":"930e4e95110de3b50047eede3947055fb36006f34695d4fcb0b2c685c7d89a0e"} Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.708163 5102 scope.go:117] "RemoveContainer" containerID="0967917b4b438ffb28aaa529291b57cd71b7709a47d1ac12e64dbd027a0becdc" Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.711527 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-cgjln"] Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.721033 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-cgjln"] Jan 23 08:03:58 crc kubenswrapper[5102]: I0123 08:03:58.734734 5102 scope.go:117] "RemoveContainer" containerID="d84a532804c2208c18e3690861da9b79c497a023064d524f5e58b985af1d2e90" Jan 23 08:03:59 crc kubenswrapper[5102]: I0123 08:03:59.614254 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="526a6267-0021-4287-a936-56aeef729c59" path="/var/lib/kubelet/pods/526a6267-0021-4287-a936-56aeef729c59/volumes" Jan 23 08:03:59 crc kubenswrapper[5102]: I0123 08:03:59.689344 5102 generic.go:334] "Generic (PLEG): container finished" podID="1c367f44-edea-4453-8219-0ec010ee30bb" containerID="7413a304ff5fb827a1463ec1aec74d6becdbfba6d41788c1acb4dc872ed610a4" exitCode=0 Jan 23 08:03:59 crc kubenswrapper[5102]: I0123 08:03:59.689441 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2srpq" event={"ID":"1c367f44-edea-4453-8219-0ec010ee30bb","Type":"ContainerDied","Data":"7413a304ff5fb827a1463ec1aec74d6becdbfba6d41788c1acb4dc872ed610a4"} Jan 23 08:04:00 crc kubenswrapper[5102]: I0123 08:04:00.699486 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2srpq" event={"ID":"1c367f44-edea-4453-8219-0ec010ee30bb","Type":"ContainerStarted","Data":"e9469ec0fa8bc56f8e8eb47d23810bc91d3ef823724cb92de5c7427b0641e38f"} Jan 23 08:04:01 crc kubenswrapper[5102]: I0123 08:04:01.715996 5102 generic.go:334] "Generic (PLEG): container finished" podID="1c367f44-edea-4453-8219-0ec010ee30bb" containerID="e9469ec0fa8bc56f8e8eb47d23810bc91d3ef823724cb92de5c7427b0641e38f" exitCode=0 Jan 23 08:04:01 crc kubenswrapper[5102]: I0123 08:04:01.716077 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2srpq" event={"ID":"1c367f44-edea-4453-8219-0ec010ee30bb","Type":"ContainerDied","Data":"e9469ec0fa8bc56f8e8eb47d23810bc91d3ef823724cb92de5c7427b0641e38f"} Jan 23 08:04:02 crc kubenswrapper[5102]: I0123 08:04:02.726163 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2srpq" event={"ID":"1c367f44-edea-4453-8219-0ec010ee30bb","Type":"ContainerStarted","Data":"3c363699a294c54f11e4eb871fec39d8b2d5e2b472a3acd7ee3ca048e7b97387"} Jan 23 08:04:02 crc kubenswrapper[5102]: I0123 08:04:02.747216 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-2srpq" podStartSLOduration=3.140237732 podStartE2EDuration="5.747195195s" podCreationTimestamp="2026-01-23 08:03:57 +0000 UTC" firstStartedPulling="2026-01-23 08:03:59.694214413 +0000 UTC m=+4190.514563388" lastFinishedPulling="2026-01-23 08:04:02.301171866 +0000 UTC m=+4193.121520851" observedRunningTime="2026-01-23 08:04:02.74572005 +0000 UTC m=+4193.566069035" watchObservedRunningTime="2026-01-23 08:04:02.747195195 +0000 UTC m=+4193.567544170" Jan 23 08:04:08 crc kubenswrapper[5102]: I0123 08:04:08.069317 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:04:08 crc kubenswrapper[5102]: I0123 08:04:08.070177 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:04:08 crc kubenswrapper[5102]: I0123 08:04:08.140263 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:04:08 crc kubenswrapper[5102]: I0123 08:04:08.832323 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:04:08 crc kubenswrapper[5102]: I0123 08:04:08.885805 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2srpq"] Jan 23 08:04:10 crc kubenswrapper[5102]: I0123 08:04:10.800599 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-2srpq" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="registry-server" containerID="cri-o://3c363699a294c54f11e4eb871fec39d8b2d5e2b472a3acd7ee3ca048e7b97387" gracePeriod=2 Jan 23 08:04:11 crc kubenswrapper[5102]: I0123 08:04:11.807931 5102 generic.go:334] "Generic (PLEG): container finished" podID="1c367f44-edea-4453-8219-0ec010ee30bb" containerID="3c363699a294c54f11e4eb871fec39d8b2d5e2b472a3acd7ee3ca048e7b97387" exitCode=0 Jan 23 08:04:11 crc kubenswrapper[5102]: I0123 08:04:11.807976 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2srpq" event={"ID":"1c367f44-edea-4453-8219-0ec010ee30bb","Type":"ContainerDied","Data":"3c363699a294c54f11e4eb871fec39d8b2d5e2b472a3acd7ee3ca048e7b97387"} Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.701295 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.820227 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-2srpq" event={"ID":"1c367f44-edea-4453-8219-0ec010ee30bb","Type":"ContainerDied","Data":"930e4e95110de3b50047eede3947055fb36006f34695d4fcb0b2c685c7d89a0e"} Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.820587 5102 scope.go:117] "RemoveContainer" containerID="3c363699a294c54f11e4eb871fec39d8b2d5e2b472a3acd7ee3ca048e7b97387" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.820322 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-2srpq" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.844714 5102 scope.go:117] "RemoveContainer" containerID="e9469ec0fa8bc56f8e8eb47d23810bc91d3ef823724cb92de5c7427b0641e38f" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.862799 5102 scope.go:117] "RemoveContainer" containerID="7413a304ff5fb827a1463ec1aec74d6becdbfba6d41788c1acb4dc872ed610a4" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.869622 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-catalog-content\") pod \"1c367f44-edea-4453-8219-0ec010ee30bb\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.869680 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-utilities\") pod \"1c367f44-edea-4453-8219-0ec010ee30bb\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.869730 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlq92\" (UniqueName: \"kubernetes.io/projected/1c367f44-edea-4453-8219-0ec010ee30bb-kube-api-access-vlq92\") pod \"1c367f44-edea-4453-8219-0ec010ee30bb\" (UID: \"1c367f44-edea-4453-8219-0ec010ee30bb\") " Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.872478 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-utilities" (OuterVolumeSpecName: "utilities") pod "1c367f44-edea-4453-8219-0ec010ee30bb" (UID: "1c367f44-edea-4453-8219-0ec010ee30bb"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.876946 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1c367f44-edea-4453-8219-0ec010ee30bb-kube-api-access-vlq92" (OuterVolumeSpecName: "kube-api-access-vlq92") pod "1c367f44-edea-4453-8219-0ec010ee30bb" (UID: "1c367f44-edea-4453-8219-0ec010ee30bb"). InnerVolumeSpecName "kube-api-access-vlq92". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.962160 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1c367f44-edea-4453-8219-0ec010ee30bb" (UID: "1c367f44-edea-4453-8219-0ec010ee30bb"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.970965 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.970996 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1c367f44-edea-4453-8219-0ec010ee30bb-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:04:12 crc kubenswrapper[5102]: I0123 08:04:12.971007 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlq92\" (UniqueName: \"kubernetes.io/projected/1c367f44-edea-4453-8219-0ec010ee30bb-kube-api-access-vlq92\") on node \"crc\" DevicePath \"\"" Jan 23 08:04:13 crc kubenswrapper[5102]: I0123 08:04:13.154069 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-2srpq"] Jan 23 08:04:13 crc kubenswrapper[5102]: I0123 08:04:13.164306 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-2srpq"] Jan 23 08:04:13 crc kubenswrapper[5102]: I0123 08:04:13.606766 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" path="/var/lib/kubelet/pods/1c367f44-edea-4453-8219-0ec010ee30bb/volumes" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.652856 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-nnk6b"] Jan 23 08:05:20 crc kubenswrapper[5102]: E0123 08:05:20.653629 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="extract-utilities" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653648 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="extract-utilities" Jan 23 08:05:20 crc kubenswrapper[5102]: E0123 08:05:20.653663 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="extract-content" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653671 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="extract-content" Jan 23 08:05:20 crc kubenswrapper[5102]: E0123 08:05:20.653713 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="registry-server" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653721 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="registry-server" Jan 23 08:05:20 crc kubenswrapper[5102]: E0123 08:05:20.653735 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="registry-server" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653742 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="registry-server" Jan 23 08:05:20 crc kubenswrapper[5102]: E0123 08:05:20.653754 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="extract-utilities" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653764 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="extract-utilities" Jan 23 08:05:20 crc kubenswrapper[5102]: E0123 08:05:20.653773 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="extract-content" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653780 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="extract-content" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653946 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="526a6267-0021-4287-a936-56aeef729c59" containerName="registry-server" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.653963 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="1c367f44-edea-4453-8219-0ec010ee30bb" containerName="registry-server" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.655479 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.678679 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nnk6b"] Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.745834 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-catalog-content\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.745908 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7z48w\" (UniqueName: \"kubernetes.io/projected/da4676ff-f31b-493d-8cb7-4bc272def04e-kube-api-access-7z48w\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.746102 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-utilities\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.846674 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-utilities\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.846774 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-catalog-content\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.846826 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7z48w\" (UniqueName: \"kubernetes.io/projected/da4676ff-f31b-493d-8cb7-4bc272def04e-kube-api-access-7z48w\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.847161 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-utilities\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.847244 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-catalog-content\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.871326 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7z48w\" (UniqueName: \"kubernetes.io/projected/da4676ff-f31b-493d-8cb7-4bc272def04e-kube-api-access-7z48w\") pod \"redhat-operators-nnk6b\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:20 crc kubenswrapper[5102]: I0123 08:05:20.982481 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:21 crc kubenswrapper[5102]: I0123 08:05:21.442775 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-nnk6b"] Jan 23 08:05:22 crc kubenswrapper[5102]: I0123 08:05:22.405180 5102 generic.go:334] "Generic (PLEG): container finished" podID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerID="95b1b022f5578f8329a964ffcd6adc25c02bb2725f87a9d708e742ab6650894d" exitCode=0 Jan 23 08:05:22 crc kubenswrapper[5102]: I0123 08:05:22.405255 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnk6b" event={"ID":"da4676ff-f31b-493d-8cb7-4bc272def04e","Type":"ContainerDied","Data":"95b1b022f5578f8329a964ffcd6adc25c02bb2725f87a9d708e742ab6650894d"} Jan 23 08:05:22 crc kubenswrapper[5102]: I0123 08:05:22.405447 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnk6b" event={"ID":"da4676ff-f31b-493d-8cb7-4bc272def04e","Type":"ContainerStarted","Data":"bcfe29c12f6436f54d236fc676a097c25aa9bd1ff4b45716b377f97336a21a4a"} Jan 23 08:05:23 crc kubenswrapper[5102]: I0123 08:05:23.421611 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnk6b" event={"ID":"da4676ff-f31b-493d-8cb7-4bc272def04e","Type":"ContainerStarted","Data":"ded4ac1a935437e86cb0c3cc572f6eea732b3632de8074a154ca81bc575ec8da"} Jan 23 08:05:24 crc kubenswrapper[5102]: I0123 08:05:24.429404 5102 generic.go:334] "Generic (PLEG): container finished" podID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerID="ded4ac1a935437e86cb0c3cc572f6eea732b3632de8074a154ca81bc575ec8da" exitCode=0 Jan 23 08:05:24 crc kubenswrapper[5102]: I0123 08:05:24.430031 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnk6b" event={"ID":"da4676ff-f31b-493d-8cb7-4bc272def04e","Type":"ContainerDied","Data":"ded4ac1a935437e86cb0c3cc572f6eea732b3632de8074a154ca81bc575ec8da"} Jan 23 08:05:25 crc kubenswrapper[5102]: I0123 08:05:25.444292 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnk6b" event={"ID":"da4676ff-f31b-493d-8cb7-4bc272def04e","Type":"ContainerStarted","Data":"dbb7bec8d5e7184304d72aa869c884e465edfc15285d81889659558aef2bb72e"} Jan 23 08:05:25 crc kubenswrapper[5102]: I0123 08:05:25.466206 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-nnk6b" podStartSLOduration=2.930453494 podStartE2EDuration="5.466189331s" podCreationTimestamp="2026-01-23 08:05:20 +0000 UTC" firstStartedPulling="2026-01-23 08:05:22.407176653 +0000 UTC m=+4273.227525638" lastFinishedPulling="2026-01-23 08:05:24.9429125 +0000 UTC m=+4275.763261475" observedRunningTime="2026-01-23 08:05:25.464189749 +0000 UTC m=+4276.284538744" watchObservedRunningTime="2026-01-23 08:05:25.466189331 +0000 UTC m=+4276.286538306" Jan 23 08:05:30 crc kubenswrapper[5102]: I0123 08:05:30.983337 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:30 crc kubenswrapper[5102]: I0123 08:05:30.983402 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:32 crc kubenswrapper[5102]: I0123 08:05:32.041644 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-nnk6b" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="registry-server" probeResult="failure" output=< Jan 23 08:05:32 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 08:05:32 crc kubenswrapper[5102]: > Jan 23 08:05:41 crc kubenswrapper[5102]: I0123 08:05:41.024053 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:41 crc kubenswrapper[5102]: I0123 08:05:41.086124 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:41 crc kubenswrapper[5102]: I0123 08:05:41.261503 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nnk6b"] Jan 23 08:05:42 crc kubenswrapper[5102]: I0123 08:05:42.573233 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-nnk6b" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="registry-server" containerID="cri-o://dbb7bec8d5e7184304d72aa869c884e465edfc15285d81889659558aef2bb72e" gracePeriod=2 Jan 23 08:05:43 crc kubenswrapper[5102]: I0123 08:05:43.582464 5102 generic.go:334] "Generic (PLEG): container finished" podID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerID="dbb7bec8d5e7184304d72aa869c884e465edfc15285d81889659558aef2bb72e" exitCode=0 Jan 23 08:05:43 crc kubenswrapper[5102]: I0123 08:05:43.582600 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnk6b" event={"ID":"da4676ff-f31b-493d-8cb7-4bc272def04e","Type":"ContainerDied","Data":"dbb7bec8d5e7184304d72aa869c884e465edfc15285d81889659558aef2bb72e"} Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.053328 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.224043 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-utilities\") pod \"da4676ff-f31b-493d-8cb7-4bc272def04e\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.224102 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-catalog-content\") pod \"da4676ff-f31b-493d-8cb7-4bc272def04e\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.224158 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7z48w\" (UniqueName: \"kubernetes.io/projected/da4676ff-f31b-493d-8cb7-4bc272def04e-kube-api-access-7z48w\") pod \"da4676ff-f31b-493d-8cb7-4bc272def04e\" (UID: \"da4676ff-f31b-493d-8cb7-4bc272def04e\") " Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.225406 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-utilities" (OuterVolumeSpecName: "utilities") pod "da4676ff-f31b-493d-8cb7-4bc272def04e" (UID: "da4676ff-f31b-493d-8cb7-4bc272def04e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.230136 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/da4676ff-f31b-493d-8cb7-4bc272def04e-kube-api-access-7z48w" (OuterVolumeSpecName: "kube-api-access-7z48w") pod "da4676ff-f31b-493d-8cb7-4bc272def04e" (UID: "da4676ff-f31b-493d-8cb7-4bc272def04e"). InnerVolumeSpecName "kube-api-access-7z48w". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.325811 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7z48w\" (UniqueName: \"kubernetes.io/projected/da4676ff-f31b-493d-8cb7-4bc272def04e-kube-api-access-7z48w\") on node \"crc\" DevicePath \"\"" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.325870 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.391583 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "da4676ff-f31b-493d-8cb7-4bc272def04e" (UID: "da4676ff-f31b-493d-8cb7-4bc272def04e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.427356 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/da4676ff-f31b-493d-8cb7-4bc272def04e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.592319 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-nnk6b" event={"ID":"da4676ff-f31b-493d-8cb7-4bc272def04e","Type":"ContainerDied","Data":"bcfe29c12f6436f54d236fc676a097c25aa9bd1ff4b45716b377f97336a21a4a"} Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.592364 5102 scope.go:117] "RemoveContainer" containerID="dbb7bec8d5e7184304d72aa869c884e465edfc15285d81889659558aef2bb72e" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.592480 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-nnk6b" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.624700 5102 scope.go:117] "RemoveContainer" containerID="ded4ac1a935437e86cb0c3cc572f6eea732b3632de8074a154ca81bc575ec8da" Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.646219 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-nnk6b"] Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.656596 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-nnk6b"] Jan 23 08:05:44 crc kubenswrapper[5102]: I0123 08:05:44.677526 5102 scope.go:117] "RemoveContainer" containerID="95b1b022f5578f8329a964ffcd6adc25c02bb2725f87a9d708e742ab6650894d" Jan 23 08:05:45 crc kubenswrapper[5102]: I0123 08:05:45.609677 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" path="/var/lib/kubelet/pods/da4676ff-f31b-493d-8cb7-4bc272def04e/volumes" Jan 23 08:05:46 crc kubenswrapper[5102]: I0123 08:05:46.768052 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:05:46 crc kubenswrapper[5102]: I0123 08:05:46.768178 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:06:16 crc kubenswrapper[5102]: I0123 08:06:16.768644 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:06:16 crc kubenswrapper[5102]: I0123 08:06:16.769619 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:06:46 crc kubenswrapper[5102]: I0123 08:06:46.768960 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:06:46 crc kubenswrapper[5102]: I0123 08:06:46.769500 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:06:46 crc kubenswrapper[5102]: I0123 08:06:46.769591 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:06:46 crc kubenswrapper[5102]: I0123 08:06:46.770206 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f29859ad4dc3279d392ad03a81703e8f7bc70e2cf416c7b044fcf441781bf993"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:06:46 crc kubenswrapper[5102]: I0123 08:06:46.770266 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://f29859ad4dc3279d392ad03a81703e8f7bc70e2cf416c7b044fcf441781bf993" gracePeriod=600 Jan 23 08:06:47 crc kubenswrapper[5102]: I0123 08:06:47.073522 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="f29859ad4dc3279d392ad03a81703e8f7bc70e2cf416c7b044fcf441781bf993" exitCode=0 Jan 23 08:06:47 crc kubenswrapper[5102]: I0123 08:06:47.073568 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"f29859ad4dc3279d392ad03a81703e8f7bc70e2cf416c7b044fcf441781bf993"} Jan 23 08:06:47 crc kubenswrapper[5102]: I0123 08:06:47.074005 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a"} Jan 23 08:06:47 crc kubenswrapper[5102]: I0123 08:06:47.074036 5102 scope.go:117] "RemoveContainer" containerID="d750796a4cf1dc5ca7e0660866d96b74890e6d40f8c80b523fa3405f0fb700b1" Jan 23 08:09:16 crc kubenswrapper[5102]: I0123 08:09:16.768635 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:09:16 crc kubenswrapper[5102]: I0123 08:09:16.769142 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:09:46 crc kubenswrapper[5102]: I0123 08:09:46.768122 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:09:46 crc kubenswrapper[5102]: I0123 08:09:46.768741 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:10:16 crc kubenswrapper[5102]: I0123 08:10:16.768363 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:10:16 crc kubenswrapper[5102]: I0123 08:10:16.769100 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:10:16 crc kubenswrapper[5102]: I0123 08:10:16.769165 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:10:16 crc kubenswrapper[5102]: I0123 08:10:16.770326 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:10:16 crc kubenswrapper[5102]: I0123 08:10:16.770395 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" gracePeriod=600 Jan 23 08:10:17 crc kubenswrapper[5102]: E0123 08:10:17.406611 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:10:17 crc kubenswrapper[5102]: I0123 08:10:17.673635 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" exitCode=0 Jan 23 08:10:17 crc kubenswrapper[5102]: I0123 08:10:17.673687 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a"} Jan 23 08:10:17 crc kubenswrapper[5102]: I0123 08:10:17.673733 5102 scope.go:117] "RemoveContainer" containerID="f29859ad4dc3279d392ad03a81703e8f7bc70e2cf416c7b044fcf441781bf993" Jan 23 08:10:17 crc kubenswrapper[5102]: I0123 08:10:17.674394 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:10:17 crc kubenswrapper[5102]: E0123 08:10:17.674716 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:10:30 crc kubenswrapper[5102]: I0123 08:10:30.597888 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:10:30 crc kubenswrapper[5102]: E0123 08:10:30.598520 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:10:41 crc kubenswrapper[5102]: I0123 08:10:41.597865 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:10:41 crc kubenswrapper[5102]: E0123 08:10:41.598519 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:10:52 crc kubenswrapper[5102]: I0123 08:10:52.597954 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:10:52 crc kubenswrapper[5102]: E0123 08:10:52.598859 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:11:06 crc kubenswrapper[5102]: I0123 08:11:06.598240 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:11:06 crc kubenswrapper[5102]: E0123 08:11:06.599182 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:11:21 crc kubenswrapper[5102]: I0123 08:11:21.598291 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:11:21 crc kubenswrapper[5102]: E0123 08:11:21.599053 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:11:36 crc kubenswrapper[5102]: I0123 08:11:36.598274 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:11:36 crc kubenswrapper[5102]: E0123 08:11:36.599669 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:11:48 crc kubenswrapper[5102]: I0123 08:11:48.599051 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:11:48 crc kubenswrapper[5102]: E0123 08:11:48.600133 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:12:03 crc kubenswrapper[5102]: I0123 08:12:03.598968 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:12:03 crc kubenswrapper[5102]: E0123 08:12:03.600820 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:12:17 crc kubenswrapper[5102]: I0123 08:12:17.598373 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:12:17 crc kubenswrapper[5102]: E0123 08:12:17.599413 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:12:32 crc kubenswrapper[5102]: I0123 08:12:32.598682 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:12:32 crc kubenswrapper[5102]: E0123 08:12:32.599716 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:12:46 crc kubenswrapper[5102]: I0123 08:12:46.599127 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:12:46 crc kubenswrapper[5102]: E0123 08:12:46.601034 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:12:59 crc kubenswrapper[5102]: I0123 08:12:59.602831 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:12:59 crc kubenswrapper[5102]: E0123 08:12:59.603711 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:13:13 crc kubenswrapper[5102]: I0123 08:13:13.601238 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:13:13 crc kubenswrapper[5102]: E0123 08:13:13.601973 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:13:26 crc kubenswrapper[5102]: I0123 08:13:26.598786 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:13:26 crc kubenswrapper[5102]: E0123 08:13:26.599536 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:13:40 crc kubenswrapper[5102]: I0123 08:13:40.598616 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:13:40 crc kubenswrapper[5102]: E0123 08:13:40.599281 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:13:51 crc kubenswrapper[5102]: I0123 08:13:51.598143 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:13:51 crc kubenswrapper[5102]: E0123 08:13:51.598921 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:14:03 crc kubenswrapper[5102]: I0123 08:14:03.598904 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:14:03 crc kubenswrapper[5102]: E0123 08:14:03.601244 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.247379 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-krk7m"] Jan 23 08:14:05 crc kubenswrapper[5102]: E0123 08:14:05.247723 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="registry-server" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.247738 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="registry-server" Jan 23 08:14:05 crc kubenswrapper[5102]: E0123 08:14:05.247758 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="extract-utilities" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.247767 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="extract-utilities" Jan 23 08:14:05 crc kubenswrapper[5102]: E0123 08:14:05.247775 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="extract-content" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.247783 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="extract-content" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.247955 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="da4676ff-f31b-493d-8cb7-4bc272def04e" containerName="registry-server" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.249288 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.265501 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-krk7m"] Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.276991 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-catalog-content\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.277053 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-utilities\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.277122 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqlr9\" (UniqueName: \"kubernetes.io/projected/500b7e95-a908-4882-bc7c-05a0a16c660e-kube-api-access-rqlr9\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.378141 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-catalog-content\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.378480 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-utilities\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.378627 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-catalog-content\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.378773 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqlr9\" (UniqueName: \"kubernetes.io/projected/500b7e95-a908-4882-bc7c-05a0a16c660e-kube-api-access-rqlr9\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.378972 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-utilities\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.397265 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqlr9\" (UniqueName: \"kubernetes.io/projected/500b7e95-a908-4882-bc7c-05a0a16c660e-kube-api-access-rqlr9\") pod \"community-operators-krk7m\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:05 crc kubenswrapper[5102]: I0123 08:14:05.574796 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:06 crc kubenswrapper[5102]: I0123 08:14:06.073103 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-krk7m"] Jan 23 08:14:06 crc kubenswrapper[5102]: I0123 08:14:06.479277 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-krk7m" event={"ID":"500b7e95-a908-4882-bc7c-05a0a16c660e","Type":"ContainerStarted","Data":"4bdf54ce9b927ecb8d79e2d946860f8ab15e9c84a170e95450d4f669a5fe200a"} Jan 23 08:14:07 crc kubenswrapper[5102]: I0123 08:14:07.486237 5102 generic.go:334] "Generic (PLEG): container finished" podID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerID="f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7" exitCode=0 Jan 23 08:14:07 crc kubenswrapper[5102]: I0123 08:14:07.486439 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-krk7m" event={"ID":"500b7e95-a908-4882-bc7c-05a0a16c660e","Type":"ContainerDied","Data":"f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7"} Jan 23 08:14:07 crc kubenswrapper[5102]: I0123 08:14:07.487943 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 08:14:09 crc kubenswrapper[5102]: I0123 08:14:09.500718 5102 generic.go:334] "Generic (PLEG): container finished" podID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerID="9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e" exitCode=0 Jan 23 08:14:09 crc kubenswrapper[5102]: I0123 08:14:09.500781 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-krk7m" event={"ID":"500b7e95-a908-4882-bc7c-05a0a16c660e","Type":"ContainerDied","Data":"9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e"} Jan 23 08:14:10 crc kubenswrapper[5102]: I0123 08:14:10.510412 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-krk7m" event={"ID":"500b7e95-a908-4882-bc7c-05a0a16c660e","Type":"ContainerStarted","Data":"6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb"} Jan 23 08:14:10 crc kubenswrapper[5102]: I0123 08:14:10.574576 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-krk7m" podStartSLOduration=2.875133126 podStartE2EDuration="5.574557333s" podCreationTimestamp="2026-01-23 08:14:05 +0000 UTC" firstStartedPulling="2026-01-23 08:14:07.487632848 +0000 UTC m=+4798.307981823" lastFinishedPulling="2026-01-23 08:14:10.187057055 +0000 UTC m=+4801.007406030" observedRunningTime="2026-01-23 08:14:10.568839676 +0000 UTC m=+4801.389188701" watchObservedRunningTime="2026-01-23 08:14:10.574557333 +0000 UTC m=+4801.394906308" Jan 23 08:14:14 crc kubenswrapper[5102]: I0123 08:14:14.597722 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:14:14 crc kubenswrapper[5102]: E0123 08:14:14.598213 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:14:15 crc kubenswrapper[5102]: I0123 08:14:15.575659 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:15 crc kubenswrapper[5102]: I0123 08:14:15.577812 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:15 crc kubenswrapper[5102]: I0123 08:14:15.669980 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:16 crc kubenswrapper[5102]: I0123 08:14:16.593687 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:16 crc kubenswrapper[5102]: I0123 08:14:16.643680 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-krk7m"] Jan 23 08:14:18 crc kubenswrapper[5102]: I0123 08:14:18.573096 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-krk7m" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="registry-server" containerID="cri-o://6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb" gracePeriod=2 Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.027840 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.180278 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-catalog-content\") pod \"500b7e95-a908-4882-bc7c-05a0a16c660e\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.180340 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqlr9\" (UniqueName: \"kubernetes.io/projected/500b7e95-a908-4882-bc7c-05a0a16c660e-kube-api-access-rqlr9\") pod \"500b7e95-a908-4882-bc7c-05a0a16c660e\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.180457 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-utilities\") pod \"500b7e95-a908-4882-bc7c-05a0a16c660e\" (UID: \"500b7e95-a908-4882-bc7c-05a0a16c660e\") " Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.181737 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-utilities" (OuterVolumeSpecName: "utilities") pod "500b7e95-a908-4882-bc7c-05a0a16c660e" (UID: "500b7e95-a908-4882-bc7c-05a0a16c660e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.188856 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/500b7e95-a908-4882-bc7c-05a0a16c660e-kube-api-access-rqlr9" (OuterVolumeSpecName: "kube-api-access-rqlr9") pod "500b7e95-a908-4882-bc7c-05a0a16c660e" (UID: "500b7e95-a908-4882-bc7c-05a0a16c660e"). InnerVolumeSpecName "kube-api-access-rqlr9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.281934 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqlr9\" (UniqueName: \"kubernetes.io/projected/500b7e95-a908-4882-bc7c-05a0a16c660e-kube-api-access-rqlr9\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.281987 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.308596 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "500b7e95-a908-4882-bc7c-05a0a16c660e" (UID: "500b7e95-a908-4882-bc7c-05a0a16c660e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.383292 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/500b7e95-a908-4882-bc7c-05a0a16c660e-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.583618 5102 generic.go:334] "Generic (PLEG): container finished" podID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerID="6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb" exitCode=0 Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.583665 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-krk7m" event={"ID":"500b7e95-a908-4882-bc7c-05a0a16c660e","Type":"ContainerDied","Data":"6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb"} Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.583692 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-krk7m" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.583712 5102 scope.go:117] "RemoveContainer" containerID="6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.583697 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-krk7m" event={"ID":"500b7e95-a908-4882-bc7c-05a0a16c660e","Type":"ContainerDied","Data":"4bdf54ce9b927ecb8d79e2d946860f8ab15e9c84a170e95450d4f669a5fe200a"} Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.612440 5102 scope.go:117] "RemoveContainer" containerID="9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.670813 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-krk7m"] Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.674329 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-krk7m"] Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.675107 5102 scope.go:117] "RemoveContainer" containerID="f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.695274 5102 scope.go:117] "RemoveContainer" containerID="6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb" Jan 23 08:14:19 crc kubenswrapper[5102]: E0123 08:14:19.695959 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb\": container with ID starting with 6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb not found: ID does not exist" containerID="6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.696006 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb"} err="failed to get container status \"6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb\": rpc error: code = NotFound desc = could not find container \"6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb\": container with ID starting with 6d3e37d36bccb3dc3d3778cd405f7cdaa9378abbc11e69a57d89a0202d4a06eb not found: ID does not exist" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.696065 5102 scope.go:117] "RemoveContainer" containerID="9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e" Jan 23 08:14:19 crc kubenswrapper[5102]: E0123 08:14:19.696885 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e\": container with ID starting with 9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e not found: ID does not exist" containerID="9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.696928 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e"} err="failed to get container status \"9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e\": rpc error: code = NotFound desc = could not find container \"9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e\": container with ID starting with 9d17f2a4d5e6304aec32537ecd1bb99201c019675d740b8bf97fc0be7d52686e not found: ID does not exist" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.696957 5102 scope.go:117] "RemoveContainer" containerID="f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7" Jan 23 08:14:19 crc kubenswrapper[5102]: E0123 08:14:19.697236 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7\": container with ID starting with f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7 not found: ID does not exist" containerID="f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7" Jan 23 08:14:19 crc kubenswrapper[5102]: I0123 08:14:19.697254 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7"} err="failed to get container status \"f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7\": rpc error: code = NotFound desc = could not find container \"f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7\": container with ID starting with f3ddb1f61107a005b9148651b8de8b77352ecf76c50e1502fab393dd83e9fdf7 not found: ID does not exist" Jan 23 08:14:21 crc kubenswrapper[5102]: I0123 08:14:21.606201 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" path="/var/lib/kubelet/pods/500b7e95-a908-4882-bc7c-05a0a16c660e/volumes" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.920865 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-nlntv"] Jan 23 08:14:23 crc kubenswrapper[5102]: E0123 08:14:23.921508 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="registry-server" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.921523 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="registry-server" Jan 23 08:14:23 crc kubenswrapper[5102]: E0123 08:14:23.921561 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="extract-utilities" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.921568 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="extract-utilities" Jan 23 08:14:23 crc kubenswrapper[5102]: E0123 08:14:23.921593 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="extract-content" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.921603 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="extract-content" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.921744 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="500b7e95-a908-4882-bc7c-05a0a16c660e" containerName="registry-server" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.922753 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.944110 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nlntv"] Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.951488 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tcqvg\" (UniqueName: \"kubernetes.io/projected/4b837a6d-aee4-45ba-85db-2be0d920c0b9-kube-api-access-tcqvg\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.951561 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-catalog-content\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:23 crc kubenswrapper[5102]: I0123 08:14:23.951624 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-utilities\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.052236 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tcqvg\" (UniqueName: \"kubernetes.io/projected/4b837a6d-aee4-45ba-85db-2be0d920c0b9-kube-api-access-tcqvg\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.052302 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-catalog-content\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.052353 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-utilities\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.053003 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-catalog-content\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.053045 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-utilities\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.078261 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tcqvg\" (UniqueName: \"kubernetes.io/projected/4b837a6d-aee4-45ba-85db-2be0d920c0b9-kube-api-access-tcqvg\") pod \"certified-operators-nlntv\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.274469 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:24 crc kubenswrapper[5102]: I0123 08:14:24.729441 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-nlntv"] Jan 23 08:14:25 crc kubenswrapper[5102]: I0123 08:14:25.632360 5102 generic.go:334] "Generic (PLEG): container finished" podID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerID="a65756d5bb68e2b221299eec2e5207083445194c807c29887afb1a997a0f6a26" exitCode=0 Jan 23 08:14:25 crc kubenswrapper[5102]: I0123 08:14:25.632435 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlntv" event={"ID":"4b837a6d-aee4-45ba-85db-2be0d920c0b9","Type":"ContainerDied","Data":"a65756d5bb68e2b221299eec2e5207083445194c807c29887afb1a997a0f6a26"} Jan 23 08:14:25 crc kubenswrapper[5102]: I0123 08:14:25.632735 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlntv" event={"ID":"4b837a6d-aee4-45ba-85db-2be0d920c0b9","Type":"ContainerStarted","Data":"0091c924560350e4eefadeaa845950749443b45df195a55527c59ec952f92084"} Jan 23 08:14:26 crc kubenswrapper[5102]: I0123 08:14:26.640027 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlntv" event={"ID":"4b837a6d-aee4-45ba-85db-2be0d920c0b9","Type":"ContainerStarted","Data":"d377a0351db2129235e43183ec8331259eec7acc991c36dfa46426e7c6994c32"} Jan 23 08:14:27 crc kubenswrapper[5102]: I0123 08:14:27.648178 5102 generic.go:334] "Generic (PLEG): container finished" podID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerID="d377a0351db2129235e43183ec8331259eec7acc991c36dfa46426e7c6994c32" exitCode=0 Jan 23 08:14:27 crc kubenswrapper[5102]: I0123 08:14:27.648223 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlntv" event={"ID":"4b837a6d-aee4-45ba-85db-2be0d920c0b9","Type":"ContainerDied","Data":"d377a0351db2129235e43183ec8331259eec7acc991c36dfa46426e7c6994c32"} Jan 23 08:14:28 crc kubenswrapper[5102]: I0123 08:14:28.659412 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlntv" event={"ID":"4b837a6d-aee4-45ba-85db-2be0d920c0b9","Type":"ContainerStarted","Data":"f97d2da97e8a381104df3fd69a9ece1fcf08d6bb35363091627b4d3489843f21"} Jan 23 08:14:28 crc kubenswrapper[5102]: I0123 08:14:28.684817 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-nlntv" podStartSLOduration=3.182756348 podStartE2EDuration="5.684788805s" podCreationTimestamp="2026-01-23 08:14:23 +0000 UTC" firstStartedPulling="2026-01-23 08:14:25.634308517 +0000 UTC m=+4816.454657532" lastFinishedPulling="2026-01-23 08:14:28.136341004 +0000 UTC m=+4818.956689989" observedRunningTime="2026-01-23 08:14:28.675684123 +0000 UTC m=+4819.496033128" watchObservedRunningTime="2026-01-23 08:14:28.684788805 +0000 UTC m=+4819.505137820" Jan 23 08:14:29 crc kubenswrapper[5102]: I0123 08:14:29.604232 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:14:29 crc kubenswrapper[5102]: E0123 08:14:29.604503 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.497828 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-v86tn"] Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.499942 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.509381 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v86tn"] Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.546957 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-utilities\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.547032 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-catalog-content\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.547062 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-7fjps\" (UniqueName: \"kubernetes.io/projected/351612af-6dc5-4567-93be-7788a09b59e0-kube-api-access-7fjps\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.647829 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-utilities\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.648110 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-catalog-content\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.648171 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-7fjps\" (UniqueName: \"kubernetes.io/projected/351612af-6dc5-4567-93be-7788a09b59e0-kube-api-access-7fjps\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.648235 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-utilities\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.648636 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-catalog-content\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.667035 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-7fjps\" (UniqueName: \"kubernetes.io/projected/351612af-6dc5-4567-93be-7788a09b59e0-kube-api-access-7fjps\") pod \"redhat-marketplace-v86tn\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:30 crc kubenswrapper[5102]: I0123 08:14:30.833446 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:31 crc kubenswrapper[5102]: I0123 08:14:31.342609 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-v86tn"] Jan 23 08:14:31 crc kubenswrapper[5102]: W0123 08:14:31.349942 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod351612af_6dc5_4567_93be_7788a09b59e0.slice/crio-b6f551b4632244fbffb3f6d535ab611e7f1bd559b537c884b597f709d31092cf WatchSource:0}: Error finding container b6f551b4632244fbffb3f6d535ab611e7f1bd559b537c884b597f709d31092cf: Status 404 returned error can't find the container with id b6f551b4632244fbffb3f6d535ab611e7f1bd559b537c884b597f709d31092cf Jan 23 08:14:31 crc kubenswrapper[5102]: I0123 08:14:31.682348 5102 generic.go:334] "Generic (PLEG): container finished" podID="351612af-6dc5-4567-93be-7788a09b59e0" containerID="afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8" exitCode=0 Jan 23 08:14:31 crc kubenswrapper[5102]: I0123 08:14:31.682388 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v86tn" event={"ID":"351612af-6dc5-4567-93be-7788a09b59e0","Type":"ContainerDied","Data":"afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8"} Jan 23 08:14:31 crc kubenswrapper[5102]: I0123 08:14:31.682415 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v86tn" event={"ID":"351612af-6dc5-4567-93be-7788a09b59e0","Type":"ContainerStarted","Data":"b6f551b4632244fbffb3f6d535ab611e7f1bd559b537c884b597f709d31092cf"} Jan 23 08:14:33 crc kubenswrapper[5102]: I0123 08:14:33.698610 5102 generic.go:334] "Generic (PLEG): container finished" podID="351612af-6dc5-4567-93be-7788a09b59e0" containerID="11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80" exitCode=0 Jan 23 08:14:33 crc kubenswrapper[5102]: I0123 08:14:33.698691 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v86tn" event={"ID":"351612af-6dc5-4567-93be-7788a09b59e0","Type":"ContainerDied","Data":"11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80"} Jan 23 08:14:34 crc kubenswrapper[5102]: I0123 08:14:34.275040 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:34 crc kubenswrapper[5102]: I0123 08:14:34.275085 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:34 crc kubenswrapper[5102]: I0123 08:14:34.319859 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:34 crc kubenswrapper[5102]: I0123 08:14:34.710346 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v86tn" event={"ID":"351612af-6dc5-4567-93be-7788a09b59e0","Type":"ContainerStarted","Data":"196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b"} Jan 23 08:14:34 crc kubenswrapper[5102]: I0123 08:14:34.735108 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-v86tn" podStartSLOduration=2.269997499 podStartE2EDuration="4.735085461s" podCreationTimestamp="2026-01-23 08:14:30 +0000 UTC" firstStartedPulling="2026-01-23 08:14:31.685842942 +0000 UTC m=+4822.506191927" lastFinishedPulling="2026-01-23 08:14:34.150930914 +0000 UTC m=+4824.971279889" observedRunningTime="2026-01-23 08:14:34.724876334 +0000 UTC m=+4825.545225309" watchObservedRunningTime="2026-01-23 08:14:34.735085461 +0000 UTC m=+4825.555434436" Jan 23 08:14:34 crc kubenswrapper[5102]: I0123 08:14:34.759815 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:36 crc kubenswrapper[5102]: I0123 08:14:36.886875 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nlntv"] Jan 23 08:14:37 crc kubenswrapper[5102]: I0123 08:14:37.735007 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-nlntv" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="registry-server" containerID="cri-o://f97d2da97e8a381104df3fd69a9ece1fcf08d6bb35363091627b4d3489843f21" gracePeriod=2 Jan 23 08:14:38 crc kubenswrapper[5102]: I0123 08:14:38.743060 5102 generic.go:334] "Generic (PLEG): container finished" podID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerID="f97d2da97e8a381104df3fd69a9ece1fcf08d6bb35363091627b4d3489843f21" exitCode=0 Jan 23 08:14:38 crc kubenswrapper[5102]: I0123 08:14:38.743127 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlntv" event={"ID":"4b837a6d-aee4-45ba-85db-2be0d920c0b9","Type":"ContainerDied","Data":"f97d2da97e8a381104df3fd69a9ece1fcf08d6bb35363091627b4d3489843f21"} Jan 23 08:14:39 crc kubenswrapper[5102]: I0123 08:14:39.965160 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.126338 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tcqvg\" (UniqueName: \"kubernetes.io/projected/4b837a6d-aee4-45ba-85db-2be0d920c0b9-kube-api-access-tcqvg\") pod \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.126444 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-utilities\") pod \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.126515 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-catalog-content\") pod \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\" (UID: \"4b837a6d-aee4-45ba-85db-2be0d920c0b9\") " Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.127825 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-utilities" (OuterVolumeSpecName: "utilities") pod "4b837a6d-aee4-45ba-85db-2be0d920c0b9" (UID: "4b837a6d-aee4-45ba-85db-2be0d920c0b9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.133235 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b837a6d-aee4-45ba-85db-2be0d920c0b9-kube-api-access-tcqvg" (OuterVolumeSpecName: "kube-api-access-tcqvg") pod "4b837a6d-aee4-45ba-85db-2be0d920c0b9" (UID: "4b837a6d-aee4-45ba-85db-2be0d920c0b9"). InnerVolumeSpecName "kube-api-access-tcqvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.182309 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4b837a6d-aee4-45ba-85db-2be0d920c0b9" (UID: "4b837a6d-aee4-45ba-85db-2be0d920c0b9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.228588 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.229802 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4b837a6d-aee4-45ba-85db-2be0d920c0b9-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.229931 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tcqvg\" (UniqueName: \"kubernetes.io/projected/4b837a6d-aee4-45ba-85db-2be0d920c0b9-kube-api-access-tcqvg\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.599358 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:14:40 crc kubenswrapper[5102]: E0123 08:14:40.599758 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.761668 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-nlntv" event={"ID":"4b837a6d-aee4-45ba-85db-2be0d920c0b9","Type":"ContainerDied","Data":"0091c924560350e4eefadeaa845950749443b45df195a55527c59ec952f92084"} Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.761734 5102 scope.go:117] "RemoveContainer" containerID="f97d2da97e8a381104df3fd69a9ece1fcf08d6bb35363091627b4d3489843f21" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.761729 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-nlntv" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.783069 5102 scope.go:117] "RemoveContainer" containerID="d377a0351db2129235e43183ec8331259eec7acc991c36dfa46426e7c6994c32" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.800327 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-nlntv"] Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.803108 5102 scope.go:117] "RemoveContainer" containerID="a65756d5bb68e2b221299eec2e5207083445194c807c29887afb1a997a0f6a26" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.813112 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-nlntv"] Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.834052 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.834347 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:40 crc kubenswrapper[5102]: I0123 08:14:40.877969 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:41 crc kubenswrapper[5102]: I0123 08:14:41.608588 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" path="/var/lib/kubelet/pods/4b837a6d-aee4-45ba-85db-2be0d920c0b9/volumes" Jan 23 08:14:41 crc kubenswrapper[5102]: I0123 08:14:41.815180 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:43 crc kubenswrapper[5102]: I0123 08:14:43.285526 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v86tn"] Jan 23 08:14:44 crc kubenswrapper[5102]: I0123 08:14:44.787587 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-v86tn" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="registry-server" containerID="cri-o://196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b" gracePeriod=2 Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.787495 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.799916 5102 generic.go:334] "Generic (PLEG): container finished" podID="351612af-6dc5-4567-93be-7788a09b59e0" containerID="196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b" exitCode=0 Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.799948 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-v86tn" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.799955 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v86tn" event={"ID":"351612af-6dc5-4567-93be-7788a09b59e0","Type":"ContainerDied","Data":"196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b"} Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.800006 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-v86tn" event={"ID":"351612af-6dc5-4567-93be-7788a09b59e0","Type":"ContainerDied","Data":"b6f551b4632244fbffb3f6d535ab611e7f1bd559b537c884b597f709d31092cf"} Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.800029 5102 scope.go:117] "RemoveContainer" containerID="196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.828291 5102 scope.go:117] "RemoveContainer" containerID="11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.844281 5102 scope.go:117] "RemoveContainer" containerID="afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.870174 5102 scope.go:117] "RemoveContainer" containerID="196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b" Jan 23 08:14:45 crc kubenswrapper[5102]: E0123 08:14:45.871417 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b\": container with ID starting with 196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b not found: ID does not exist" containerID="196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.871495 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b"} err="failed to get container status \"196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b\": rpc error: code = NotFound desc = could not find container \"196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b\": container with ID starting with 196096a94067256d0ae958b4e4439f15027edfbb7a96d99f576c2514432d389b not found: ID does not exist" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.871555 5102 scope.go:117] "RemoveContainer" containerID="11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80" Jan 23 08:14:45 crc kubenswrapper[5102]: E0123 08:14:45.872114 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80\": container with ID starting with 11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80 not found: ID does not exist" containerID="11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.872148 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80"} err="failed to get container status \"11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80\": rpc error: code = NotFound desc = could not find container \"11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80\": container with ID starting with 11bcd1227331af063a1be11bb38a60ddf57332b3150bd78f2bfa1433ae863b80 not found: ID does not exist" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.872176 5102 scope.go:117] "RemoveContainer" containerID="afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8" Jan 23 08:14:45 crc kubenswrapper[5102]: E0123 08:14:45.872472 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8\": container with ID starting with afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8 not found: ID does not exist" containerID="afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.872515 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8"} err="failed to get container status \"afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8\": rpc error: code = NotFound desc = could not find container \"afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8\": container with ID starting with afcb30f1299e84b1c575052462af32bac146a34ac0843cd9ca7327ec254f6ad8 not found: ID does not exist" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.915949 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7fjps\" (UniqueName: \"kubernetes.io/projected/351612af-6dc5-4567-93be-7788a09b59e0-kube-api-access-7fjps\") pod \"351612af-6dc5-4567-93be-7788a09b59e0\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.916154 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-catalog-content\") pod \"351612af-6dc5-4567-93be-7788a09b59e0\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.916179 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-utilities\") pod \"351612af-6dc5-4567-93be-7788a09b59e0\" (UID: \"351612af-6dc5-4567-93be-7788a09b59e0\") " Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.917101 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-utilities" (OuterVolumeSpecName: "utilities") pod "351612af-6dc5-4567-93be-7788a09b59e0" (UID: "351612af-6dc5-4567-93be-7788a09b59e0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.921506 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/351612af-6dc5-4567-93be-7788a09b59e0-kube-api-access-7fjps" (OuterVolumeSpecName: "kube-api-access-7fjps") pod "351612af-6dc5-4567-93be-7788a09b59e0" (UID: "351612af-6dc5-4567-93be-7788a09b59e0"). InnerVolumeSpecName "kube-api-access-7fjps". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:14:45 crc kubenswrapper[5102]: I0123 08:14:45.942897 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "351612af-6dc5-4567-93be-7788a09b59e0" (UID: "351612af-6dc5-4567-93be-7788a09b59e0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:14:46 crc kubenswrapper[5102]: I0123 08:14:46.018248 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7fjps\" (UniqueName: \"kubernetes.io/projected/351612af-6dc5-4567-93be-7788a09b59e0-kube-api-access-7fjps\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:46 crc kubenswrapper[5102]: I0123 08:14:46.018559 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:46 crc kubenswrapper[5102]: I0123 08:14:46.018575 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/351612af-6dc5-4567-93be-7788a09b59e0-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:14:46 crc kubenswrapper[5102]: I0123 08:14:46.131690 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-v86tn"] Jan 23 08:14:46 crc kubenswrapper[5102]: I0123 08:14:46.137660 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-v86tn"] Jan 23 08:14:47 crc kubenswrapper[5102]: I0123 08:14:47.608468 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="351612af-6dc5-4567-93be-7788a09b59e0" path="/var/lib/kubelet/pods/351612af-6dc5-4567-93be-7788a09b59e0/volumes" Jan 23 08:14:51 crc kubenswrapper[5102]: I0123 08:14:51.599026 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:14:51 crc kubenswrapper[5102]: E0123 08:14:51.600317 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.161457 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f"] Jan 23 08:15:00 crc kubenswrapper[5102]: E0123 08:15:00.162359 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="registry-server" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162378 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="registry-server" Jan 23 08:15:00 crc kubenswrapper[5102]: E0123 08:15:00.162405 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="extract-content" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162413 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="extract-content" Jan 23 08:15:00 crc kubenswrapper[5102]: E0123 08:15:00.162428 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="extract-content" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162436 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="extract-content" Jan 23 08:15:00 crc kubenswrapper[5102]: E0123 08:15:00.162445 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="registry-server" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162453 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="registry-server" Jan 23 08:15:00 crc kubenswrapper[5102]: E0123 08:15:00.162466 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="extract-utilities" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162474 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="extract-utilities" Jan 23 08:15:00 crc kubenswrapper[5102]: E0123 08:15:00.162500 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="extract-utilities" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162510 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="extract-utilities" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162698 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b837a6d-aee4-45ba-85db-2be0d920c0b9" containerName="registry-server" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.162716 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="351612af-6dc5-4567-93be-7788a09b59e0" containerName="registry-server" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.163247 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.168463 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.171754 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.171831 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f"] Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.222131 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f3576b6-9001-41bf-986c-2389cc0ac574-config-volume\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.222188 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-29x8x\" (UniqueName: \"kubernetes.io/projected/5f3576b6-9001-41bf-986c-2389cc0ac574-kube-api-access-29x8x\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.222524 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f3576b6-9001-41bf-986c-2389cc0ac574-secret-volume\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.323908 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f3576b6-9001-41bf-986c-2389cc0ac574-secret-volume\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.323982 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f3576b6-9001-41bf-986c-2389cc0ac574-config-volume\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.324013 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-29x8x\" (UniqueName: \"kubernetes.io/projected/5f3576b6-9001-41bf-986c-2389cc0ac574-kube-api-access-29x8x\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.325986 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f3576b6-9001-41bf-986c-2389cc0ac574-config-volume\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.331449 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f3576b6-9001-41bf-986c-2389cc0ac574-secret-volume\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.344978 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-29x8x\" (UniqueName: \"kubernetes.io/projected/5f3576b6-9001-41bf-986c-2389cc0ac574-kube-api-access-29x8x\") pod \"collect-profiles-29485935-hzq6f\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.485927 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:00 crc kubenswrapper[5102]: I0123 08:15:00.921814 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f"] Jan 23 08:15:01 crc kubenswrapper[5102]: I0123 08:15:01.953075 5102 generic.go:334] "Generic (PLEG): container finished" podID="5f3576b6-9001-41bf-986c-2389cc0ac574" containerID="bc252338db75d6ccff3fc3121fea70650b00195f84e4d6a8b79ed0b7affc72a6" exitCode=0 Jan 23 08:15:01 crc kubenswrapper[5102]: I0123 08:15:01.953239 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" event={"ID":"5f3576b6-9001-41bf-986c-2389cc0ac574","Type":"ContainerDied","Data":"bc252338db75d6ccff3fc3121fea70650b00195f84e4d6a8b79ed0b7affc72a6"} Jan 23 08:15:01 crc kubenswrapper[5102]: I0123 08:15:01.954305 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" event={"ID":"5f3576b6-9001-41bf-986c-2389cc0ac574","Type":"ContainerStarted","Data":"277f1d153bd1f940c043ebff26e793edbe8b7b9e6a774db65f9641f9c1f68c58"} Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.238079 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.368294 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f3576b6-9001-41bf-986c-2389cc0ac574-secret-volume\") pod \"5f3576b6-9001-41bf-986c-2389cc0ac574\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.368385 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f3576b6-9001-41bf-986c-2389cc0ac574-config-volume\") pod \"5f3576b6-9001-41bf-986c-2389cc0ac574\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.368590 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-29x8x\" (UniqueName: \"kubernetes.io/projected/5f3576b6-9001-41bf-986c-2389cc0ac574-kube-api-access-29x8x\") pod \"5f3576b6-9001-41bf-986c-2389cc0ac574\" (UID: \"5f3576b6-9001-41bf-986c-2389cc0ac574\") " Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.369263 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5f3576b6-9001-41bf-986c-2389cc0ac574-config-volume" (OuterVolumeSpecName: "config-volume") pod "5f3576b6-9001-41bf-986c-2389cc0ac574" (UID: "5f3576b6-9001-41bf-986c-2389cc0ac574"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.374046 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5f3576b6-9001-41bf-986c-2389cc0ac574-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "5f3576b6-9001-41bf-986c-2389cc0ac574" (UID: "5f3576b6-9001-41bf-986c-2389cc0ac574"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.375763 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f3576b6-9001-41bf-986c-2389cc0ac574-kube-api-access-29x8x" (OuterVolumeSpecName: "kube-api-access-29x8x") pod "5f3576b6-9001-41bf-986c-2389cc0ac574" (UID: "5f3576b6-9001-41bf-986c-2389cc0ac574"). InnerVolumeSpecName "kube-api-access-29x8x". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.469989 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/5f3576b6-9001-41bf-986c-2389cc0ac574-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.470023 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/5f3576b6-9001-41bf-986c-2389cc0ac574-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.470034 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-29x8x\" (UniqueName: \"kubernetes.io/projected/5f3576b6-9001-41bf-986c-2389cc0ac574-kube-api-access-29x8x\") on node \"crc\" DevicePath \"\"" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.972251 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" event={"ID":"5f3576b6-9001-41bf-986c-2389cc0ac574","Type":"ContainerDied","Data":"277f1d153bd1f940c043ebff26e793edbe8b7b9e6a774db65f9641f9c1f68c58"} Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.972332 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="277f1d153bd1f940c043ebff26e793edbe8b7b9e6a774db65f9641f9c1f68c58" Jan 23 08:15:03 crc kubenswrapper[5102]: I0123 08:15:03.972366 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f" Jan 23 08:15:04 crc kubenswrapper[5102]: I0123 08:15:04.305645 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm"] Jan 23 08:15:04 crc kubenswrapper[5102]: I0123 08:15:04.310104 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485890-jcxgm"] Jan 23 08:15:05 crc kubenswrapper[5102]: I0123 08:15:05.598061 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:15:05 crc kubenswrapper[5102]: E0123 08:15:05.598448 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:15:05 crc kubenswrapper[5102]: I0123 08:15:05.607486 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d1ef1e0b-601f-4faa-bc86-2aaacd0cef11" path="/var/lib/kubelet/pods/d1ef1e0b-601f-4faa-bc86-2aaacd0cef11/volumes" Jan 23 08:15:16 crc kubenswrapper[5102]: I0123 08:15:16.599180 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:15:16 crc kubenswrapper[5102]: I0123 08:15:16.599947 5102 scope.go:117] "RemoveContainer" containerID="f8efcffc78b36bb69db74f192696ab626e8b58c9a847e1d9ad9e4118cab4a7fb" Jan 23 08:15:16 crc kubenswrapper[5102]: E0123 08:15:16.600056 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:15:29 crc kubenswrapper[5102]: I0123 08:15:29.602371 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:15:30 crc kubenswrapper[5102]: I0123 08:15:30.161317 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"f68bc6252ad25daba53d90ab7c8741f64aab8d025f45ccae9ed3b953a7e53c1d"} Jan 23 08:17:46 crc kubenswrapper[5102]: I0123 08:17:46.769982 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:17:46 crc kubenswrapper[5102]: I0123 08:17:46.770605 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:18:16 crc kubenswrapper[5102]: I0123 08:18:16.768706 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:18:16 crc kubenswrapper[5102]: I0123 08:18:16.769128 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:18:46 crc kubenswrapper[5102]: I0123 08:18:46.769219 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:18:46 crc kubenswrapper[5102]: I0123 08:18:46.769892 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:18:46 crc kubenswrapper[5102]: I0123 08:18:46.769957 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:18:46 crc kubenswrapper[5102]: I0123 08:18:46.770990 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"f68bc6252ad25daba53d90ab7c8741f64aab8d025f45ccae9ed3b953a7e53c1d"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:18:46 crc kubenswrapper[5102]: I0123 08:18:46.771127 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://f68bc6252ad25daba53d90ab7c8741f64aab8d025f45ccae9ed3b953a7e53c1d" gracePeriod=600 Jan 23 08:18:47 crc kubenswrapper[5102]: I0123 08:18:47.822785 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="f68bc6252ad25daba53d90ab7c8741f64aab8d025f45ccae9ed3b953a7e53c1d" exitCode=0 Jan 23 08:18:47 crc kubenswrapper[5102]: I0123 08:18:47.822889 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"f68bc6252ad25daba53d90ab7c8741f64aab8d025f45ccae9ed3b953a7e53c1d"} Jan 23 08:18:47 crc kubenswrapper[5102]: I0123 08:18:47.823521 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f"} Jan 23 08:18:47 crc kubenswrapper[5102]: I0123 08:18:47.823576 5102 scope.go:117] "RemoveContainer" containerID="558743fe9cc9cffd6a253b24b56b07529169df488996e452315c72924bbc0a2a" Jan 23 08:21:16 crc kubenswrapper[5102]: I0123 08:21:16.769735 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:21:16 crc kubenswrapper[5102]: I0123 08:21:16.770325 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:21:44 crc kubenswrapper[5102]: I0123 08:21:44.876095 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-r6wf5"] Jan 23 08:21:44 crc kubenswrapper[5102]: E0123 08:21:44.877908 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f3576b6-9001-41bf-986c-2389cc0ac574" containerName="collect-profiles" Jan 23 08:21:44 crc kubenswrapper[5102]: I0123 08:21:44.877935 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f3576b6-9001-41bf-986c-2389cc0ac574" containerName="collect-profiles" Jan 23 08:21:44 crc kubenswrapper[5102]: I0123 08:21:44.878231 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f3576b6-9001-41bf-986c-2389cc0ac574" containerName="collect-profiles" Jan 23 08:21:44 crc kubenswrapper[5102]: I0123 08:21:44.880189 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:44 crc kubenswrapper[5102]: I0123 08:21:44.890593 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r6wf5"] Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.032493 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-utilities\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.032595 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-catalog-content\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.032631 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p9b9k\" (UniqueName: \"kubernetes.io/projected/70237cd8-ccfd-4784-bd92-f47ad3573f11-kube-api-access-p9b9k\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.134319 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-utilities\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.134409 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-catalog-content\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.134442 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p9b9k\" (UniqueName: \"kubernetes.io/projected/70237cd8-ccfd-4784-bd92-f47ad3573f11-kube-api-access-p9b9k\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.135114 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-catalog-content\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.135119 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-utilities\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.163497 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p9b9k\" (UniqueName: \"kubernetes.io/projected/70237cd8-ccfd-4784-bd92-f47ad3573f11-kube-api-access-p9b9k\") pod \"redhat-operators-r6wf5\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.218319 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:45 crc kubenswrapper[5102]: I0123 08:21:45.461298 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-r6wf5"] Jan 23 08:21:46 crc kubenswrapper[5102]: I0123 08:21:46.215429 5102 generic.go:334] "Generic (PLEG): container finished" podID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerID="fc97949e9d381d84122fe90bf42f5a3ee7fdb323ffcab1269b4dfbd2fde42366" exitCode=0 Jan 23 08:21:46 crc kubenswrapper[5102]: I0123 08:21:46.215509 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6wf5" event={"ID":"70237cd8-ccfd-4784-bd92-f47ad3573f11","Type":"ContainerDied","Data":"fc97949e9d381d84122fe90bf42f5a3ee7fdb323ffcab1269b4dfbd2fde42366"} Jan 23 08:21:46 crc kubenswrapper[5102]: I0123 08:21:46.215752 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6wf5" event={"ID":"70237cd8-ccfd-4784-bd92-f47ad3573f11","Type":"ContainerStarted","Data":"4fb83537db9124fe171d9ee90251ca28b04391d11544d1a35b8ea26f35b3934f"} Jan 23 08:21:46 crc kubenswrapper[5102]: I0123 08:21:46.217271 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 08:21:46 crc kubenswrapper[5102]: I0123 08:21:46.768663 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:21:46 crc kubenswrapper[5102]: I0123 08:21:46.768987 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:21:47 crc kubenswrapper[5102]: I0123 08:21:47.226962 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6wf5" event={"ID":"70237cd8-ccfd-4784-bd92-f47ad3573f11","Type":"ContainerStarted","Data":"294ce1b4815b6794cd277e0859dffdd84c98c5b5ea7849d005223c44530b0f07"} Jan 23 08:21:48 crc kubenswrapper[5102]: I0123 08:21:48.239114 5102 generic.go:334] "Generic (PLEG): container finished" podID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerID="294ce1b4815b6794cd277e0859dffdd84c98c5b5ea7849d005223c44530b0f07" exitCode=0 Jan 23 08:21:48 crc kubenswrapper[5102]: I0123 08:21:48.239214 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6wf5" event={"ID":"70237cd8-ccfd-4784-bd92-f47ad3573f11","Type":"ContainerDied","Data":"294ce1b4815b6794cd277e0859dffdd84c98c5b5ea7849d005223c44530b0f07"} Jan 23 08:21:49 crc kubenswrapper[5102]: I0123 08:21:49.248751 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6wf5" event={"ID":"70237cd8-ccfd-4784-bd92-f47ad3573f11","Type":"ContainerStarted","Data":"fc8232ac392f9ef073ff390ab3b67bc6078f7ecab7a5800f6e31496a24220ee5"} Jan 23 08:21:49 crc kubenswrapper[5102]: I0123 08:21:49.273702 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-r6wf5" podStartSLOduration=2.815682997 podStartE2EDuration="5.273678593s" podCreationTimestamp="2026-01-23 08:21:44 +0000 UTC" firstStartedPulling="2026-01-23 08:21:46.217039388 +0000 UTC m=+5257.037388363" lastFinishedPulling="2026-01-23 08:21:48.675034984 +0000 UTC m=+5259.495383959" observedRunningTime="2026-01-23 08:21:49.271826385 +0000 UTC m=+5260.092175400" watchObservedRunningTime="2026-01-23 08:21:49.273678593 +0000 UTC m=+5260.094027588" Jan 23 08:21:55 crc kubenswrapper[5102]: I0123 08:21:55.219615 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:55 crc kubenswrapper[5102]: I0123 08:21:55.221975 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:55 crc kubenswrapper[5102]: I0123 08:21:55.280087 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:55 crc kubenswrapper[5102]: I0123 08:21:55.345458 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:21:58 crc kubenswrapper[5102]: I0123 08:21:58.058937 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r6wf5"] Jan 23 08:21:58 crc kubenswrapper[5102]: I0123 08:21:58.329403 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-r6wf5" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="registry-server" containerID="cri-o://fc8232ac392f9ef073ff390ab3b67bc6078f7ecab7a5800f6e31496a24220ee5" gracePeriod=2 Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.344330 5102 generic.go:334] "Generic (PLEG): container finished" podID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerID="fc8232ac392f9ef073ff390ab3b67bc6078f7ecab7a5800f6e31496a24220ee5" exitCode=0 Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.344397 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6wf5" event={"ID":"70237cd8-ccfd-4784-bd92-f47ad3573f11","Type":"ContainerDied","Data":"fc8232ac392f9ef073ff390ab3b67bc6078f7ecab7a5800f6e31496a24220ee5"} Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.557788 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.580445 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p9b9k\" (UniqueName: \"kubernetes.io/projected/70237cd8-ccfd-4784-bd92-f47ad3573f11-kube-api-access-p9b9k\") pod \"70237cd8-ccfd-4784-bd92-f47ad3573f11\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.580903 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-catalog-content\") pod \"70237cd8-ccfd-4784-bd92-f47ad3573f11\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.581039 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-utilities\") pod \"70237cd8-ccfd-4784-bd92-f47ad3573f11\" (UID: \"70237cd8-ccfd-4784-bd92-f47ad3573f11\") " Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.581996 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-utilities" (OuterVolumeSpecName: "utilities") pod "70237cd8-ccfd-4784-bd92-f47ad3573f11" (UID: "70237cd8-ccfd-4784-bd92-f47ad3573f11"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.589053 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/70237cd8-ccfd-4784-bd92-f47ad3573f11-kube-api-access-p9b9k" (OuterVolumeSpecName: "kube-api-access-p9b9k") pod "70237cd8-ccfd-4784-bd92-f47ad3573f11" (UID: "70237cd8-ccfd-4784-bd92-f47ad3573f11"). InnerVolumeSpecName "kube-api-access-p9b9k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.684012 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.684040 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p9b9k\" (UniqueName: \"kubernetes.io/projected/70237cd8-ccfd-4784-bd92-f47ad3573f11-kube-api-access-p9b9k\") on node \"crc\" DevicePath \"\"" Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.702389 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "70237cd8-ccfd-4784-bd92-f47ad3573f11" (UID: "70237cd8-ccfd-4784-bd92-f47ad3573f11"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:22:00 crc kubenswrapper[5102]: I0123 08:22:00.785325 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/70237cd8-ccfd-4784-bd92-f47ad3573f11-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.357090 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-r6wf5" event={"ID":"70237cd8-ccfd-4784-bd92-f47ad3573f11","Type":"ContainerDied","Data":"4fb83537db9124fe171d9ee90251ca28b04391d11544d1a35b8ea26f35b3934f"} Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.357141 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-r6wf5" Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.357194 5102 scope.go:117] "RemoveContainer" containerID="fc8232ac392f9ef073ff390ab3b67bc6078f7ecab7a5800f6e31496a24220ee5" Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.397266 5102 scope.go:117] "RemoveContainer" containerID="294ce1b4815b6794cd277e0859dffdd84c98c5b5ea7849d005223c44530b0f07" Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.398588 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-r6wf5"] Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.403173 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-r6wf5"] Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.420072 5102 scope.go:117] "RemoveContainer" containerID="fc97949e9d381d84122fe90bf42f5a3ee7fdb323ffcab1269b4dfbd2fde42366" Jan 23 08:22:01 crc kubenswrapper[5102]: I0123 08:22:01.608269 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" path="/var/lib/kubelet/pods/70237cd8-ccfd-4784-bd92-f47ad3573f11/volumes" Jan 23 08:22:16 crc kubenswrapper[5102]: I0123 08:22:16.768884 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:22:16 crc kubenswrapper[5102]: I0123 08:22:16.769414 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:22:16 crc kubenswrapper[5102]: I0123 08:22:16.769472 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:22:16 crc kubenswrapper[5102]: I0123 08:22:16.770094 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:22:16 crc kubenswrapper[5102]: I0123 08:22:16.770160 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" gracePeriod=600 Jan 23 08:22:16 crc kubenswrapper[5102]: E0123 08:22:16.900933 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:22:17 crc kubenswrapper[5102]: I0123 08:22:17.515576 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" exitCode=0 Jan 23 08:22:17 crc kubenswrapper[5102]: I0123 08:22:17.515651 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f"} Jan 23 08:22:17 crc kubenswrapper[5102]: I0123 08:22:17.515742 5102 scope.go:117] "RemoveContainer" containerID="f68bc6252ad25daba53d90ab7c8741f64aab8d025f45ccae9ed3b953a7e53c1d" Jan 23 08:22:17 crc kubenswrapper[5102]: I0123 08:22:17.516331 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:22:17 crc kubenswrapper[5102]: E0123 08:22:17.516669 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:22:28 crc kubenswrapper[5102]: I0123 08:22:28.598142 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:22:28 crc kubenswrapper[5102]: E0123 08:22:28.598842 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:22:43 crc kubenswrapper[5102]: I0123 08:22:43.598498 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:22:43 crc kubenswrapper[5102]: E0123 08:22:43.599793 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:22:58 crc kubenswrapper[5102]: I0123 08:22:58.597698 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:22:58 crc kubenswrapper[5102]: E0123 08:22:58.598201 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:23:09 crc kubenswrapper[5102]: I0123 08:23:09.605922 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:23:09 crc kubenswrapper[5102]: E0123 08:23:09.606675 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:23:23 crc kubenswrapper[5102]: I0123 08:23:23.598718 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:23:23 crc kubenswrapper[5102]: E0123 08:23:23.599414 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:23:36 crc kubenswrapper[5102]: I0123 08:23:36.599253 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:23:36 crc kubenswrapper[5102]: E0123 08:23:36.601113 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:23:49 crc kubenswrapper[5102]: I0123 08:23:49.602999 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:23:49 crc kubenswrapper[5102]: E0123 08:23:49.603786 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:24:00 crc kubenswrapper[5102]: I0123 08:24:00.597880 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:24:00 crc kubenswrapper[5102]: E0123 08:24:00.598680 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:24:12 crc kubenswrapper[5102]: I0123 08:24:12.598234 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:24:12 crc kubenswrapper[5102]: E0123 08:24:12.599144 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:24:27 crc kubenswrapper[5102]: I0123 08:24:27.598730 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:24:27 crc kubenswrapper[5102]: E0123 08:24:27.599559 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:24:39 crc kubenswrapper[5102]: I0123 08:24:39.598084 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:24:39 crc kubenswrapper[5102]: E0123 08:24:39.598971 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:24:50 crc kubenswrapper[5102]: I0123 08:24:50.598359 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:24:50 crc kubenswrapper[5102]: E0123 08:24:50.599153 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:25:05 crc kubenswrapper[5102]: I0123 08:25:05.598722 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:25:05 crc kubenswrapper[5102]: E0123 08:25:05.600069 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:25:20 crc kubenswrapper[5102]: I0123 08:25:20.598710 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:25:20 crc kubenswrapper[5102]: E0123 08:25:20.600421 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:25:31 crc kubenswrapper[5102]: I0123 08:25:31.598517 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:25:31 crc kubenswrapper[5102]: E0123 08:25:31.599325 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.186395 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-7xp8z"] Jan 23 08:25:34 crc kubenswrapper[5102]: E0123 08:25:34.192494 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="extract-content" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.192569 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="extract-content" Jan 23 08:25:34 crc kubenswrapper[5102]: E0123 08:25:34.192606 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="extract-utilities" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.192616 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="extract-utilities" Jan 23 08:25:34 crc kubenswrapper[5102]: E0123 08:25:34.192627 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="registry-server" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.192636 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="registry-server" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.192862 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="70237cd8-ccfd-4784-bd92-f47ad3573f11" containerName="registry-server" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.223224 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7xp8z"] Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.223520 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.351576 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-catalog-content\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.352125 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8dgtd\" (UniqueName: \"kubernetes.io/projected/a435e804-35c5-4e3b-a850-857d4e14de5a-kube-api-access-8dgtd\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.352209 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-utilities\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.453380 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8dgtd\" (UniqueName: \"kubernetes.io/projected/a435e804-35c5-4e3b-a850-857d4e14de5a-kube-api-access-8dgtd\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.453492 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-utilities\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.453534 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-catalog-content\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.454313 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-catalog-content\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.454331 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-utilities\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.481058 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8dgtd\" (UniqueName: \"kubernetes.io/projected/a435e804-35c5-4e3b-a850-857d4e14de5a-kube-api-access-8dgtd\") pod \"certified-operators-7xp8z\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:34 crc kubenswrapper[5102]: I0123 08:25:34.558303 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:35 crc kubenswrapper[5102]: I0123 08:25:35.030998 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-7xp8z"] Jan 23 08:25:35 crc kubenswrapper[5102]: I0123 08:25:35.062937 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7xp8z" event={"ID":"a435e804-35c5-4e3b-a850-857d4e14de5a","Type":"ContainerStarted","Data":"70c4b416475f9988ab7bdb5a69eb9224d7b6f2b2f77183a172bd6702c3e04141"} Jan 23 08:25:36 crc kubenswrapper[5102]: I0123 08:25:36.072397 5102 generic.go:334] "Generic (PLEG): container finished" podID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerID="6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b" exitCode=0 Jan 23 08:25:36 crc kubenswrapper[5102]: I0123 08:25:36.072453 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7xp8z" event={"ID":"a435e804-35c5-4e3b-a850-857d4e14de5a","Type":"ContainerDied","Data":"6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b"} Jan 23 08:25:37 crc kubenswrapper[5102]: I0123 08:25:37.082408 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7xp8z" event={"ID":"a435e804-35c5-4e3b-a850-857d4e14de5a","Type":"ContainerStarted","Data":"4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0"} Jan 23 08:25:38 crc kubenswrapper[5102]: I0123 08:25:38.097640 5102 generic.go:334] "Generic (PLEG): container finished" podID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerID="4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0" exitCode=0 Jan 23 08:25:38 crc kubenswrapper[5102]: I0123 08:25:38.097751 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7xp8z" event={"ID":"a435e804-35c5-4e3b-a850-857d4e14de5a","Type":"ContainerDied","Data":"4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0"} Jan 23 08:25:39 crc kubenswrapper[5102]: I0123 08:25:39.109932 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7xp8z" event={"ID":"a435e804-35c5-4e3b-a850-857d4e14de5a","Type":"ContainerStarted","Data":"51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94"} Jan 23 08:25:39 crc kubenswrapper[5102]: I0123 08:25:39.137405 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-7xp8z" podStartSLOduration=2.591856874 podStartE2EDuration="5.137376639s" podCreationTimestamp="2026-01-23 08:25:34 +0000 UTC" firstStartedPulling="2026-01-23 08:25:36.075161191 +0000 UTC m=+5486.895510176" lastFinishedPulling="2026-01-23 08:25:38.620680976 +0000 UTC m=+5489.441029941" observedRunningTime="2026-01-23 08:25:39.130487215 +0000 UTC m=+5489.950836270" watchObservedRunningTime="2026-01-23 08:25:39.137376639 +0000 UTC m=+5489.957725654" Jan 23 08:25:44 crc kubenswrapper[5102]: I0123 08:25:44.558702 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:44 crc kubenswrapper[5102]: I0123 08:25:44.559305 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:44 crc kubenswrapper[5102]: I0123 08:25:44.637973 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:45 crc kubenswrapper[5102]: I0123 08:25:45.394266 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:45 crc kubenswrapper[5102]: I0123 08:25:45.441372 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7xp8z"] Jan 23 08:25:46 crc kubenswrapper[5102]: I0123 08:25:46.598657 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:25:46 crc kubenswrapper[5102]: E0123 08:25:46.598868 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:25:47 crc kubenswrapper[5102]: I0123 08:25:47.344189 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-7xp8z" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="registry-server" containerID="cri-o://51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94" gracePeriod=2 Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.246089 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.353514 5102 generic.go:334] "Generic (PLEG): container finished" podID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerID="51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94" exitCode=0 Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.353588 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7xp8z" event={"ID":"a435e804-35c5-4e3b-a850-857d4e14de5a","Type":"ContainerDied","Data":"51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94"} Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.353617 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-7xp8z" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.353644 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-7xp8z" event={"ID":"a435e804-35c5-4e3b-a850-857d4e14de5a","Type":"ContainerDied","Data":"70c4b416475f9988ab7bdb5a69eb9224d7b6f2b2f77183a172bd6702c3e04141"} Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.353669 5102 scope.go:117] "RemoveContainer" containerID="51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.374170 5102 scope.go:117] "RemoveContainer" containerID="4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.377487 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-catalog-content\") pod \"a435e804-35c5-4e3b-a850-857d4e14de5a\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.383734 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8dgtd\" (UniqueName: \"kubernetes.io/projected/a435e804-35c5-4e3b-a850-857d4e14de5a-kube-api-access-8dgtd\") pod \"a435e804-35c5-4e3b-a850-857d4e14de5a\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.383782 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-utilities\") pod \"a435e804-35c5-4e3b-a850-857d4e14de5a\" (UID: \"a435e804-35c5-4e3b-a850-857d4e14de5a\") " Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.386387 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-utilities" (OuterVolumeSpecName: "utilities") pod "a435e804-35c5-4e3b-a850-857d4e14de5a" (UID: "a435e804-35c5-4e3b-a850-857d4e14de5a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.390835 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a435e804-35c5-4e3b-a850-857d4e14de5a-kube-api-access-8dgtd" (OuterVolumeSpecName: "kube-api-access-8dgtd") pod "a435e804-35c5-4e3b-a850-857d4e14de5a" (UID: "a435e804-35c5-4e3b-a850-857d4e14de5a"). InnerVolumeSpecName "kube-api-access-8dgtd". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.399233 5102 scope.go:117] "RemoveContainer" containerID="6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.431139 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a435e804-35c5-4e3b-a850-857d4e14de5a" (UID: "a435e804-35c5-4e3b-a850-857d4e14de5a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.438162 5102 scope.go:117] "RemoveContainer" containerID="51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94" Jan 23 08:25:48 crc kubenswrapper[5102]: E0123 08:25:48.438744 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94\": container with ID starting with 51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94 not found: ID does not exist" containerID="51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.438803 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94"} err="failed to get container status \"51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94\": rpc error: code = NotFound desc = could not find container \"51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94\": container with ID starting with 51f9e708dbca87eae1a12f97da8f870f4671a4f276ad6f5e414f6f6b4ff31e94 not found: ID does not exist" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.438851 5102 scope.go:117] "RemoveContainer" containerID="4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0" Jan 23 08:25:48 crc kubenswrapper[5102]: E0123 08:25:48.439348 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0\": container with ID starting with 4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0 not found: ID does not exist" containerID="4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.439385 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0"} err="failed to get container status \"4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0\": rpc error: code = NotFound desc = could not find container \"4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0\": container with ID starting with 4776ecdc0ed455584686d6f00129d49ab9a8c68a499ee7345cd5f47b9feaefe0 not found: ID does not exist" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.439399 5102 scope.go:117] "RemoveContainer" containerID="6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b" Jan 23 08:25:48 crc kubenswrapper[5102]: E0123 08:25:48.439866 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b\": container with ID starting with 6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b not found: ID does not exist" containerID="6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.439887 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b"} err="failed to get container status \"6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b\": rpc error: code = NotFound desc = could not find container \"6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b\": container with ID starting with 6d4f6e990c8f1a2f3d16e996c1f56102ad038fb08ecbbd8b60585b1d58d8750b not found: ID does not exist" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.485730 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8dgtd\" (UniqueName: \"kubernetes.io/projected/a435e804-35c5-4e3b-a850-857d4e14de5a-kube-api-access-8dgtd\") on node \"crc\" DevicePath \"\"" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.485763 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.485775 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a435e804-35c5-4e3b-a850-857d4e14de5a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.687431 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-7xp8z"] Jan 23 08:25:48 crc kubenswrapper[5102]: I0123 08:25:48.692583 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-7xp8z"] Jan 23 08:25:49 crc kubenswrapper[5102]: I0123 08:25:49.615405 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" path="/var/lib/kubelet/pods/a435e804-35c5-4e3b-a850-857d4e14de5a/volumes" Jan 23 08:26:01 crc kubenswrapper[5102]: I0123 08:26:01.600694 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:26:01 crc kubenswrapper[5102]: E0123 08:26:01.601998 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:26:13 crc kubenswrapper[5102]: I0123 08:26:13.598532 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:26:13 crc kubenswrapper[5102]: E0123 08:26:13.599821 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:26:24 crc kubenswrapper[5102]: I0123 08:26:24.598343 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:26:24 crc kubenswrapper[5102]: E0123 08:26:24.601246 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.744000 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-vhtl6"] Jan 23 08:26:27 crc kubenswrapper[5102]: E0123 08:26:27.746693 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="registry-server" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.746739 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="registry-server" Jan 23 08:26:27 crc kubenswrapper[5102]: E0123 08:26:27.746804 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="extract-utilities" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.746816 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="extract-utilities" Jan 23 08:26:27 crc kubenswrapper[5102]: E0123 08:26:27.746841 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="extract-content" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.746852 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="extract-content" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.747156 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="a435e804-35c5-4e3b-a850-857d4e14de5a" containerName="registry-server" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.748521 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.753158 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vhtl6"] Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.806515 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-utilities\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.806644 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-catalog-content\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.806680 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-q4ddq\" (UniqueName: \"kubernetes.io/projected/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-kube-api-access-q4ddq\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.907737 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-utilities\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.907826 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-catalog-content\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.907868 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-q4ddq\" (UniqueName: \"kubernetes.io/projected/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-kube-api-access-q4ddq\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.908344 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-utilities\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.908415 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-catalog-content\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:27 crc kubenswrapper[5102]: I0123 08:26:27.929531 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-q4ddq\" (UniqueName: \"kubernetes.io/projected/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-kube-api-access-q4ddq\") pod \"redhat-marketplace-vhtl6\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.087603 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.292206 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rsj4b"] Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.303274 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.308051 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rsj4b"] Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.351117 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-vhtl6"] Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.413747 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-utilities\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.413816 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qvq2z\" (UniqueName: \"kubernetes.io/projected/5f70e23c-8646-4520-8e87-ee968792249a-kube-api-access-qvq2z\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.413870 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-catalog-content\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.514914 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-utilities\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.514969 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qvq2z\" (UniqueName: \"kubernetes.io/projected/5f70e23c-8646-4520-8e87-ee968792249a-kube-api-access-qvq2z\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.514999 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-catalog-content\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.515624 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-catalog-content\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.515636 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-utilities\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.536352 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qvq2z\" (UniqueName: \"kubernetes.io/projected/5f70e23c-8646-4520-8e87-ee968792249a-kube-api-access-qvq2z\") pod \"community-operators-rsj4b\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.636896 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.700644 5102 generic.go:334] "Generic (PLEG): container finished" podID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerID="0f5b08f0b8b7033c386b6eb35cc1dd7cca72f86b6e61579a8419bb01073dae53" exitCode=0 Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.700724 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vhtl6" event={"ID":"5aad6378-dcd8-4ccf-bfd9-d914620f7bda","Type":"ContainerDied","Data":"0f5b08f0b8b7033c386b6eb35cc1dd7cca72f86b6e61579a8419bb01073dae53"} Jan 23 08:26:28 crc kubenswrapper[5102]: I0123 08:26:28.700798 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vhtl6" event={"ID":"5aad6378-dcd8-4ccf-bfd9-d914620f7bda","Type":"ContainerStarted","Data":"de2d4418880c9ddf6b793825109a67c4653813d41717fbfffb1438f77d3a307f"} Jan 23 08:26:29 crc kubenswrapper[5102]: I0123 08:26:29.137124 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rsj4b"] Jan 23 08:26:29 crc kubenswrapper[5102]: I0123 08:26:29.709573 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vhtl6" event={"ID":"5aad6378-dcd8-4ccf-bfd9-d914620f7bda","Type":"ContainerStarted","Data":"24fe7b4dab2b72274cd0f821b8ec84a39f5b0881b38d78e7221e7150a0d16147"} Jan 23 08:26:29 crc kubenswrapper[5102]: I0123 08:26:29.711836 5102 generic.go:334] "Generic (PLEG): container finished" podID="5f70e23c-8646-4520-8e87-ee968792249a" containerID="4dc232b904fab566053e9dbde5b65287d78eaf27492fbd03689901c8f2b5e523" exitCode=0 Jan 23 08:26:29 crc kubenswrapper[5102]: I0123 08:26:29.711886 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsj4b" event={"ID":"5f70e23c-8646-4520-8e87-ee968792249a","Type":"ContainerDied","Data":"4dc232b904fab566053e9dbde5b65287d78eaf27492fbd03689901c8f2b5e523"} Jan 23 08:26:29 crc kubenswrapper[5102]: I0123 08:26:29.711914 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsj4b" event={"ID":"5f70e23c-8646-4520-8e87-ee968792249a","Type":"ContainerStarted","Data":"56abbd251e137242e349f750b5321b9550d420e79ef8a846f1dde1964e4525e0"} Jan 23 08:26:30 crc kubenswrapper[5102]: I0123 08:26:30.722501 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsj4b" event={"ID":"5f70e23c-8646-4520-8e87-ee968792249a","Type":"ContainerStarted","Data":"7fbfa9f5c25c6ebdc4d99ce0bd902787857b13b6e0c85818b8c020b967dbce99"} Jan 23 08:26:30 crc kubenswrapper[5102]: I0123 08:26:30.726350 5102 generic.go:334] "Generic (PLEG): container finished" podID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerID="24fe7b4dab2b72274cd0f821b8ec84a39f5b0881b38d78e7221e7150a0d16147" exitCode=0 Jan 23 08:26:30 crc kubenswrapper[5102]: I0123 08:26:30.726388 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vhtl6" event={"ID":"5aad6378-dcd8-4ccf-bfd9-d914620f7bda","Type":"ContainerDied","Data":"24fe7b4dab2b72274cd0f821b8ec84a39f5b0881b38d78e7221e7150a0d16147"} Jan 23 08:26:31 crc kubenswrapper[5102]: I0123 08:26:31.739153 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vhtl6" event={"ID":"5aad6378-dcd8-4ccf-bfd9-d914620f7bda","Type":"ContainerStarted","Data":"2fc1d12bd81d3cb6737487118870f52267329882b55805a923bcd1ce9836568c"} Jan 23 08:26:31 crc kubenswrapper[5102]: I0123 08:26:31.742303 5102 generic.go:334] "Generic (PLEG): container finished" podID="5f70e23c-8646-4520-8e87-ee968792249a" containerID="7fbfa9f5c25c6ebdc4d99ce0bd902787857b13b6e0c85818b8c020b967dbce99" exitCode=0 Jan 23 08:26:31 crc kubenswrapper[5102]: I0123 08:26:31.742347 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsj4b" event={"ID":"5f70e23c-8646-4520-8e87-ee968792249a","Type":"ContainerDied","Data":"7fbfa9f5c25c6ebdc4d99ce0bd902787857b13b6e0c85818b8c020b967dbce99"} Jan 23 08:26:31 crc kubenswrapper[5102]: I0123 08:26:31.774099 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-vhtl6" podStartSLOduration=2.210729209 podStartE2EDuration="4.774082757s" podCreationTimestamp="2026-01-23 08:26:27 +0000 UTC" firstStartedPulling="2026-01-23 08:26:28.702215621 +0000 UTC m=+5539.522564596" lastFinishedPulling="2026-01-23 08:26:31.265569169 +0000 UTC m=+5542.085918144" observedRunningTime="2026-01-23 08:26:31.772263121 +0000 UTC m=+5542.592612136" watchObservedRunningTime="2026-01-23 08:26:31.774082757 +0000 UTC m=+5542.594431732" Jan 23 08:26:32 crc kubenswrapper[5102]: I0123 08:26:32.752396 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsj4b" event={"ID":"5f70e23c-8646-4520-8e87-ee968792249a","Type":"ContainerStarted","Data":"6d1df41464fb6c981fd43aa840813d9eee11b442f82c5a056c2d2508e8ef6f14"} Jan 23 08:26:32 crc kubenswrapper[5102]: I0123 08:26:32.775526 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rsj4b" podStartSLOduration=2.210012708 podStartE2EDuration="4.775500751s" podCreationTimestamp="2026-01-23 08:26:28 +0000 UTC" firstStartedPulling="2026-01-23 08:26:29.714166142 +0000 UTC m=+5540.534515117" lastFinishedPulling="2026-01-23 08:26:32.279654185 +0000 UTC m=+5543.100003160" observedRunningTime="2026-01-23 08:26:32.774704448 +0000 UTC m=+5543.595053433" watchObservedRunningTime="2026-01-23 08:26:32.775500751 +0000 UTC m=+5543.595849736" Jan 23 08:26:35 crc kubenswrapper[5102]: I0123 08:26:35.598451 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:26:35 crc kubenswrapper[5102]: E0123 08:26:35.599820 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.088449 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.093238 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.140214 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.637178 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.637237 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.711586 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.842615 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:38 crc kubenswrapper[5102]: I0123 08:26:38.847358 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:39 crc kubenswrapper[5102]: I0123 08:26:39.683460 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vhtl6"] Jan 23 08:26:40 crc kubenswrapper[5102]: I0123 08:26:40.812784 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-vhtl6" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="registry-server" containerID="cri-o://2fc1d12bd81d3cb6737487118870f52267329882b55805a923bcd1ce9836568c" gracePeriod=2 Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.481410 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rsj4b"] Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.482023 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rsj4b" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="registry-server" containerID="cri-o://6d1df41464fb6c981fd43aa840813d9eee11b442f82c5a056c2d2508e8ef6f14" gracePeriod=2 Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.823025 5102 generic.go:334] "Generic (PLEG): container finished" podID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerID="2fc1d12bd81d3cb6737487118870f52267329882b55805a923bcd1ce9836568c" exitCode=0 Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.823103 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vhtl6" event={"ID":"5aad6378-dcd8-4ccf-bfd9-d914620f7bda","Type":"ContainerDied","Data":"2fc1d12bd81d3cb6737487118870f52267329882b55805a923bcd1ce9836568c"} Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.826126 5102 generic.go:334] "Generic (PLEG): container finished" podID="5f70e23c-8646-4520-8e87-ee968792249a" containerID="6d1df41464fb6c981fd43aa840813d9eee11b442f82c5a056c2d2508e8ef6f14" exitCode=0 Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.826181 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsj4b" event={"ID":"5f70e23c-8646-4520-8e87-ee968792249a","Type":"ContainerDied","Data":"6d1df41464fb6c981fd43aa840813d9eee11b442f82c5a056c2d2508e8ef6f14"} Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.826208 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rsj4b" event={"ID":"5f70e23c-8646-4520-8e87-ee968792249a","Type":"ContainerDied","Data":"56abbd251e137242e349f750b5321b9550d420e79ef8a846f1dde1964e4525e0"} Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.826227 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="56abbd251e137242e349f750b5321b9550d420e79ef8a846f1dde1964e4525e0" Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.863998 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.919695 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-utilities\") pod \"5f70e23c-8646-4520-8e87-ee968792249a\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.919897 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qvq2z\" (UniqueName: \"kubernetes.io/projected/5f70e23c-8646-4520-8e87-ee968792249a-kube-api-access-qvq2z\") pod \"5f70e23c-8646-4520-8e87-ee968792249a\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.919982 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-catalog-content\") pod \"5f70e23c-8646-4520-8e87-ee968792249a\" (UID: \"5f70e23c-8646-4520-8e87-ee968792249a\") " Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.920736 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-utilities" (OuterVolumeSpecName: "utilities") pod "5f70e23c-8646-4520-8e87-ee968792249a" (UID: "5f70e23c-8646-4520-8e87-ee968792249a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.925842 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5f70e23c-8646-4520-8e87-ee968792249a-kube-api-access-qvq2z" (OuterVolumeSpecName: "kube-api-access-qvq2z") pod "5f70e23c-8646-4520-8e87-ee968792249a" (UID: "5f70e23c-8646-4520-8e87-ee968792249a"). InnerVolumeSpecName "kube-api-access-qvq2z". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:26:41 crc kubenswrapper[5102]: I0123 08:26:41.995998 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5f70e23c-8646-4520-8e87-ee968792249a" (UID: "5f70e23c-8646-4520-8e87-ee968792249a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.022069 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qvq2z\" (UniqueName: \"kubernetes.io/projected/5f70e23c-8646-4520-8e87-ee968792249a-kube-api-access-qvq2z\") on node \"crc\" DevicePath \"\"" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.022120 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.022132 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5f70e23c-8646-4520-8e87-ee968792249a-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.335987 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.427335 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-q4ddq\" (UniqueName: \"kubernetes.io/projected/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-kube-api-access-q4ddq\") pod \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.427415 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-utilities\") pod \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.427555 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-catalog-content\") pod \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\" (UID: \"5aad6378-dcd8-4ccf-bfd9-d914620f7bda\") " Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.428262 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-utilities" (OuterVolumeSpecName: "utilities") pod "5aad6378-dcd8-4ccf-bfd9-d914620f7bda" (UID: "5aad6378-dcd8-4ccf-bfd9-d914620f7bda"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.432813 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-kube-api-access-q4ddq" (OuterVolumeSpecName: "kube-api-access-q4ddq") pod "5aad6378-dcd8-4ccf-bfd9-d914620f7bda" (UID: "5aad6378-dcd8-4ccf-bfd9-d914620f7bda"). InnerVolumeSpecName "kube-api-access-q4ddq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.455580 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5aad6378-dcd8-4ccf-bfd9-d914620f7bda" (UID: "5aad6378-dcd8-4ccf-bfd9-d914620f7bda"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.529777 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-q4ddq\" (UniqueName: \"kubernetes.io/projected/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-kube-api-access-q4ddq\") on node \"crc\" DevicePath \"\"" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.529826 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.529842 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5aad6378-dcd8-4ccf-bfd9-d914620f7bda-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.836147 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rsj4b" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.836149 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-vhtl6" event={"ID":"5aad6378-dcd8-4ccf-bfd9-d914620f7bda","Type":"ContainerDied","Data":"de2d4418880c9ddf6b793825109a67c4653813d41717fbfffb1438f77d3a307f"} Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.836208 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-vhtl6" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.836239 5102 scope.go:117] "RemoveContainer" containerID="2fc1d12bd81d3cb6737487118870f52267329882b55805a923bcd1ce9836568c" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.858502 5102 scope.go:117] "RemoveContainer" containerID="24fe7b4dab2b72274cd0f821b8ec84a39f5b0881b38d78e7221e7150a0d16147" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.870553 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rsj4b"] Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.881498 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rsj4b"] Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.899401 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-vhtl6"] Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.901010 5102 scope.go:117] "RemoveContainer" containerID="0f5b08f0b8b7033c386b6eb35cc1dd7cca72f86b6e61579a8419bb01073dae53" Jan 23 08:26:42 crc kubenswrapper[5102]: I0123 08:26:42.908066 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-vhtl6"] Jan 23 08:26:44 crc kubenswrapper[5102]: I0123 08:26:44.052921 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" path="/var/lib/kubelet/pods/5aad6378-dcd8-4ccf-bfd9-d914620f7bda/volumes" Jan 23 08:26:44 crc kubenswrapper[5102]: I0123 08:26:44.058988 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f70e23c-8646-4520-8e87-ee968792249a" path="/var/lib/kubelet/pods/5f70e23c-8646-4520-8e87-ee968792249a/volumes" Jan 23 08:26:50 crc kubenswrapper[5102]: I0123 08:26:50.598173 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:26:50 crc kubenswrapper[5102]: E0123 08:26:50.599111 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:27:04 crc kubenswrapper[5102]: I0123 08:27:04.598210 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:27:04 crc kubenswrapper[5102]: E0123 08:27:04.599313 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:27:17 crc kubenswrapper[5102]: I0123 08:27:17.598320 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:27:18 crc kubenswrapper[5102]: I0123 08:27:18.338238 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"3616ade85a3347c04a389689d526f32d4482290b33bf98f7160195bdd3544363"} Jan 23 08:29:46 crc kubenswrapper[5102]: I0123 08:29:46.768920 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:29:46 crc kubenswrapper[5102]: I0123 08:29:46.769577 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.159062 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt"] Jan 23 08:30:00 crc kubenswrapper[5102]: E0123 08:30:00.160143 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="extract-utilities" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160162 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="extract-utilities" Jan 23 08:30:00 crc kubenswrapper[5102]: E0123 08:30:00.160184 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="registry-server" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160196 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="registry-server" Jan 23 08:30:00 crc kubenswrapper[5102]: E0123 08:30:00.160217 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="registry-server" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160228 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="registry-server" Jan 23 08:30:00 crc kubenswrapper[5102]: E0123 08:30:00.160244 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="extract-utilities" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160254 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="extract-utilities" Jan 23 08:30:00 crc kubenswrapper[5102]: E0123 08:30:00.160268 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="extract-content" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160280 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="extract-content" Jan 23 08:30:00 crc kubenswrapper[5102]: E0123 08:30:00.160309 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="extract-content" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160319 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="extract-content" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160527 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="5aad6378-dcd8-4ccf-bfd9-d914620f7bda" containerName="registry-server" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.160581 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="5f70e23c-8646-4520-8e87-ee968792249a" containerName="registry-server" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.161279 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.163977 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.164600 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.231003 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt"] Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.277265 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0f83b99a-e856-40f7-a283-34567ff41f3a-config-volume\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.277347 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0f83b99a-e856-40f7-a283-34567ff41f3a-secret-volume\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.277411 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5kxwr\" (UniqueName: \"kubernetes.io/projected/0f83b99a-e856-40f7-a283-34567ff41f3a-kube-api-access-5kxwr\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.379237 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0f83b99a-e856-40f7-a283-34567ff41f3a-config-volume\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.379283 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0f83b99a-e856-40f7-a283-34567ff41f3a-secret-volume\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.379338 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5kxwr\" (UniqueName: \"kubernetes.io/projected/0f83b99a-e856-40f7-a283-34567ff41f3a-kube-api-access-5kxwr\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.380446 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0f83b99a-e856-40f7-a283-34567ff41f3a-config-volume\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.389030 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0f83b99a-e856-40f7-a283-34567ff41f3a-secret-volume\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.395993 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5kxwr\" (UniqueName: \"kubernetes.io/projected/0f83b99a-e856-40f7-a283-34567ff41f3a-kube-api-access-5kxwr\") pod \"collect-profiles-29485950-zgdvt\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.481756 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:00 crc kubenswrapper[5102]: I0123 08:30:00.920764 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt"] Jan 23 08:30:00 crc kubenswrapper[5102]: W0123 08:30:00.927212 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0f83b99a_e856_40f7_a283_34567ff41f3a.slice/crio-d84a7ea801b2d1003847df2d748843838ccd487e16c2f1860a5500b7c3be0809 WatchSource:0}: Error finding container d84a7ea801b2d1003847df2d748843838ccd487e16c2f1860a5500b7c3be0809: Status 404 returned error can't find the container with id d84a7ea801b2d1003847df2d748843838ccd487e16c2f1860a5500b7c3be0809 Jan 23 08:30:01 crc kubenswrapper[5102]: I0123 08:30:01.655295 5102 generic.go:334] "Generic (PLEG): container finished" podID="0f83b99a-e856-40f7-a283-34567ff41f3a" containerID="701788de32001527bdea0aae171bd87191cf19cee4eb7adf8440d1227e9e9843" exitCode=0 Jan 23 08:30:01 crc kubenswrapper[5102]: I0123 08:30:01.655329 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" event={"ID":"0f83b99a-e856-40f7-a283-34567ff41f3a","Type":"ContainerDied","Data":"701788de32001527bdea0aae171bd87191cf19cee4eb7adf8440d1227e9e9843"} Jan 23 08:30:01 crc kubenswrapper[5102]: I0123 08:30:01.655583 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" event={"ID":"0f83b99a-e856-40f7-a283-34567ff41f3a","Type":"ContainerStarted","Data":"d84a7ea801b2d1003847df2d748843838ccd487e16c2f1860a5500b7c3be0809"} Jan 23 08:30:02 crc kubenswrapper[5102]: I0123 08:30:02.979505 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.119997 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0f83b99a-e856-40f7-a283-34567ff41f3a-config-volume\") pod \"0f83b99a-e856-40f7-a283-34567ff41f3a\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.120065 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0f83b99a-e856-40f7-a283-34567ff41f3a-secret-volume\") pod \"0f83b99a-e856-40f7-a283-34567ff41f3a\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.120265 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5kxwr\" (UniqueName: \"kubernetes.io/projected/0f83b99a-e856-40f7-a283-34567ff41f3a-kube-api-access-5kxwr\") pod \"0f83b99a-e856-40f7-a283-34567ff41f3a\" (UID: \"0f83b99a-e856-40f7-a283-34567ff41f3a\") " Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.120985 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0f83b99a-e856-40f7-a283-34567ff41f3a-config-volume" (OuterVolumeSpecName: "config-volume") pod "0f83b99a-e856-40f7-a283-34567ff41f3a" (UID: "0f83b99a-e856-40f7-a283-34567ff41f3a"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.127454 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0f83b99a-e856-40f7-a283-34567ff41f3a-kube-api-access-5kxwr" (OuterVolumeSpecName: "kube-api-access-5kxwr") pod "0f83b99a-e856-40f7-a283-34567ff41f3a" (UID: "0f83b99a-e856-40f7-a283-34567ff41f3a"). InnerVolumeSpecName "kube-api-access-5kxwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.131784 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0f83b99a-e856-40f7-a283-34567ff41f3a-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0f83b99a-e856-40f7-a283-34567ff41f3a" (UID: "0f83b99a-e856-40f7-a283-34567ff41f3a"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.222066 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0f83b99a-e856-40f7-a283-34567ff41f3a-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.222100 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0f83b99a-e856-40f7-a283-34567ff41f3a-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.222111 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5kxwr\" (UniqueName: \"kubernetes.io/projected/0f83b99a-e856-40f7-a283-34567ff41f3a-kube-api-access-5kxwr\") on node \"crc\" DevicePath \"\"" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.669308 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" event={"ID":"0f83b99a-e856-40f7-a283-34567ff41f3a","Type":"ContainerDied","Data":"d84a7ea801b2d1003847df2d748843838ccd487e16c2f1860a5500b7c3be0809"} Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.669352 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt" Jan 23 08:30:03 crc kubenswrapper[5102]: I0123 08:30:03.669362 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d84a7ea801b2d1003847df2d748843838ccd487e16c2f1860a5500b7c3be0809" Jan 23 08:30:04 crc kubenswrapper[5102]: I0123 08:30:04.053369 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8"] Jan 23 08:30:04 crc kubenswrapper[5102]: I0123 08:30:04.058042 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485905-l9vx8"] Jan 23 08:30:05 crc kubenswrapper[5102]: I0123 08:30:05.607457 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ead9d1be-80cc-4e9e-b413-9f9095d0483b" path="/var/lib/kubelet/pods/ead9d1be-80cc-4e9e-b413-9f9095d0483b/volumes" Jan 23 08:30:16 crc kubenswrapper[5102]: I0123 08:30:16.769129 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:30:16 crc kubenswrapper[5102]: I0123 08:30:16.769989 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:30:16 crc kubenswrapper[5102]: I0123 08:30:16.971442 5102 scope.go:117] "RemoveContainer" containerID="5435ec258553850903e880653a680a72480e70c428f7b284f43cf4d6794eaf86" Jan 23 08:30:46 crc kubenswrapper[5102]: I0123 08:30:46.768509 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:30:46 crc kubenswrapper[5102]: I0123 08:30:46.769203 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:30:46 crc kubenswrapper[5102]: I0123 08:30:46.769256 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:30:46 crc kubenswrapper[5102]: I0123 08:30:46.770006 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"3616ade85a3347c04a389689d526f32d4482290b33bf98f7160195bdd3544363"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:30:46 crc kubenswrapper[5102]: I0123 08:30:46.770070 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://3616ade85a3347c04a389689d526f32d4482290b33bf98f7160195bdd3544363" gracePeriod=600 Jan 23 08:30:47 crc kubenswrapper[5102]: I0123 08:30:47.007897 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="3616ade85a3347c04a389689d526f32d4482290b33bf98f7160195bdd3544363" exitCode=0 Jan 23 08:30:47 crc kubenswrapper[5102]: I0123 08:30:47.007971 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"3616ade85a3347c04a389689d526f32d4482290b33bf98f7160195bdd3544363"} Jan 23 08:30:47 crc kubenswrapper[5102]: I0123 08:30:47.008210 5102 scope.go:117] "RemoveContainer" containerID="27a7049b63f4d4012a887159b7033e16b354fa4301b7964e8031bcaae2df954f" Jan 23 08:30:48 crc kubenswrapper[5102]: I0123 08:30:48.018534 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83"} Jan 23 08:33:16 crc kubenswrapper[5102]: I0123 08:33:16.768929 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:33:16 crc kubenswrapper[5102]: I0123 08:33:16.769502 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:33:17 crc kubenswrapper[5102]: I0123 08:33:17.053791 5102 scope.go:117] "RemoveContainer" containerID="6d1df41464fb6c981fd43aa840813d9eee11b442f82c5a056c2d2508e8ef6f14" Jan 23 08:33:17 crc kubenswrapper[5102]: I0123 08:33:17.088679 5102 scope.go:117] "RemoveContainer" containerID="4dc232b904fab566053e9dbde5b65287d78eaf27492fbd03689901c8f2b5e523" Jan 23 08:33:17 crc kubenswrapper[5102]: I0123 08:33:17.122159 5102 scope.go:117] "RemoveContainer" containerID="7fbfa9f5c25c6ebdc4d99ce0bd902787857b13b6e0c85818b8c020b967dbce99" Jan 23 08:33:46 crc kubenswrapper[5102]: I0123 08:33:46.769126 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:33:46 crc kubenswrapper[5102]: I0123 08:33:46.770769 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:34:16 crc kubenswrapper[5102]: I0123 08:34:16.767888 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:34:16 crc kubenswrapper[5102]: I0123 08:34:16.768455 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:34:16 crc kubenswrapper[5102]: I0123 08:34:16.768516 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:34:16 crc kubenswrapper[5102]: I0123 08:34:16.769213 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:34:16 crc kubenswrapper[5102]: I0123 08:34:16.769273 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" gracePeriod=600 Jan 23 08:34:17 crc kubenswrapper[5102]: E0123 08:34:17.399016 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:34:17 crc kubenswrapper[5102]: I0123 08:34:17.839706 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" exitCode=0 Jan 23 08:34:17 crc kubenswrapper[5102]: I0123 08:34:17.839774 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83"} Jan 23 08:34:17 crc kubenswrapper[5102]: I0123 08:34:17.840434 5102 scope.go:117] "RemoveContainer" containerID="3616ade85a3347c04a389689d526f32d4482290b33bf98f7160195bdd3544363" Jan 23 08:34:17 crc kubenswrapper[5102]: I0123 08:34:17.841066 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:34:17 crc kubenswrapper[5102]: E0123 08:34:17.841400 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:34:32 crc kubenswrapper[5102]: I0123 08:34:32.597869 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:34:32 crc kubenswrapper[5102]: E0123 08:34:32.600502 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:34:45 crc kubenswrapper[5102]: I0123 08:34:45.598220 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:34:45 crc kubenswrapper[5102]: E0123 08:34:45.599026 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:34:58 crc kubenswrapper[5102]: I0123 08:34:58.597758 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:34:58 crc kubenswrapper[5102]: E0123 08:34:58.598604 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:35:10 crc kubenswrapper[5102]: I0123 08:35:10.598349 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:35:10 crc kubenswrapper[5102]: E0123 08:35:10.599246 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:35:23 crc kubenswrapper[5102]: I0123 08:35:23.598727 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:35:23 crc kubenswrapper[5102]: E0123 08:35:23.599442 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:35:35 crc kubenswrapper[5102]: I0123 08:35:35.598099 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:35:35 crc kubenswrapper[5102]: E0123 08:35:35.598879 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:35:48 crc kubenswrapper[5102]: I0123 08:35:48.598702 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:35:48 crc kubenswrapper[5102]: E0123 08:35:48.599825 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:35:58 crc kubenswrapper[5102]: I0123 08:35:58.975459 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-52bmt"] Jan 23 08:35:58 crc kubenswrapper[5102]: E0123 08:35:58.976402 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0f83b99a-e856-40f7-a283-34567ff41f3a" containerName="collect-profiles" Jan 23 08:35:58 crc kubenswrapper[5102]: I0123 08:35:58.976421 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0f83b99a-e856-40f7-a283-34567ff41f3a" containerName="collect-profiles" Jan 23 08:35:58 crc kubenswrapper[5102]: I0123 08:35:58.976590 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0f83b99a-e856-40f7-a283-34567ff41f3a" containerName="collect-profiles" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.000324 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.015986 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-52bmt"] Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.139982 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-utilities\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.140358 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-catalog-content\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.140720 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4428k\" (UniqueName: \"kubernetes.io/projected/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-kube-api-access-4428k\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.242166 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-utilities\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.242239 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-catalog-content\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.242310 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4428k\" (UniqueName: \"kubernetes.io/projected/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-kube-api-access-4428k\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.242880 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-utilities\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.242880 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-catalog-content\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.262663 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4428k\" (UniqueName: \"kubernetes.io/projected/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-kube-api-access-4428k\") pod \"certified-operators-52bmt\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.336560 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:35:59 crc kubenswrapper[5102]: I0123 08:35:59.646901 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-52bmt"] Jan 23 08:36:00 crc kubenswrapper[5102]: I0123 08:36:00.089190 5102 generic.go:334] "Generic (PLEG): container finished" podID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerID="531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c" exitCode=0 Jan 23 08:36:00 crc kubenswrapper[5102]: I0123 08:36:00.089245 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-52bmt" event={"ID":"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139","Type":"ContainerDied","Data":"531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c"} Jan 23 08:36:00 crc kubenswrapper[5102]: I0123 08:36:00.089278 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-52bmt" event={"ID":"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139","Type":"ContainerStarted","Data":"403dbd6c2b6b7144ab060f2f0449d05e6bcadd17f05b2bea453c4b71471608bc"} Jan 23 08:36:00 crc kubenswrapper[5102]: I0123 08:36:00.091449 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 08:36:02 crc kubenswrapper[5102]: I0123 08:36:02.106812 5102 generic.go:334] "Generic (PLEG): container finished" podID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerID="a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178" exitCode=0 Jan 23 08:36:02 crc kubenswrapper[5102]: I0123 08:36:02.106914 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-52bmt" event={"ID":"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139","Type":"ContainerDied","Data":"a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178"} Jan 23 08:36:03 crc kubenswrapper[5102]: I0123 08:36:03.115993 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-52bmt" event={"ID":"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139","Type":"ContainerStarted","Data":"3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61"} Jan 23 08:36:03 crc kubenswrapper[5102]: I0123 08:36:03.135184 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-52bmt" podStartSLOduration=2.625776553 podStartE2EDuration="5.13516456s" podCreationTimestamp="2026-01-23 08:35:58 +0000 UTC" firstStartedPulling="2026-01-23 08:36:00.091137186 +0000 UTC m=+6110.911486161" lastFinishedPulling="2026-01-23 08:36:02.600525193 +0000 UTC m=+6113.420874168" observedRunningTime="2026-01-23 08:36:03.131472896 +0000 UTC m=+6113.951821881" watchObservedRunningTime="2026-01-23 08:36:03.13516456 +0000 UTC m=+6113.955513525" Jan 23 08:36:03 crc kubenswrapper[5102]: I0123 08:36:03.598360 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:36:03 crc kubenswrapper[5102]: E0123 08:36:03.598629 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:36:09 crc kubenswrapper[5102]: I0123 08:36:09.337507 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:36:09 crc kubenswrapper[5102]: I0123 08:36:09.338009 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:36:09 crc kubenswrapper[5102]: I0123 08:36:09.378014 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:36:10 crc kubenswrapper[5102]: I0123 08:36:10.243610 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:36:10 crc kubenswrapper[5102]: I0123 08:36:10.348395 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-52bmt"] Jan 23 08:36:12 crc kubenswrapper[5102]: I0123 08:36:12.201915 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-52bmt" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="registry-server" containerID="cri-o://3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61" gracePeriod=2 Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.679455 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.854005 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-catalog-content\") pod \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.855637 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-utilities\") pod \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.856606 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4428k\" (UniqueName: \"kubernetes.io/projected/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-kube-api-access-4428k\") pod \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\" (UID: \"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139\") " Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.859410 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-utilities" (OuterVolumeSpecName: "utilities") pod "e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" (UID: "e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.864560 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-kube-api-access-4428k" (OuterVolumeSpecName: "kube-api-access-4428k") pod "e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" (UID: "e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139"). InnerVolumeSpecName "kube-api-access-4428k". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.908675 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" (UID: "e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.957874 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4428k\" (UniqueName: \"kubernetes.io/projected/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-kube-api-access-4428k\") on node \"crc\" DevicePath \"\"" Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.957923 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:36:13 crc kubenswrapper[5102]: I0123 08:36:13.957936 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.222315 5102 generic.go:334] "Generic (PLEG): container finished" podID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerID="3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61" exitCode=0 Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.222377 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-52bmt" event={"ID":"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139","Type":"ContainerDied","Data":"3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61"} Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.222414 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-52bmt" event={"ID":"e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139","Type":"ContainerDied","Data":"403dbd6c2b6b7144ab060f2f0449d05e6bcadd17f05b2bea453c4b71471608bc"} Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.222435 5102 scope.go:117] "RemoveContainer" containerID="3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.223156 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-52bmt" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.252465 5102 scope.go:117] "RemoveContainer" containerID="a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.271043 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-52bmt"] Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.276142 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-52bmt"] Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.308789 5102 scope.go:117] "RemoveContainer" containerID="531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.329070 5102 scope.go:117] "RemoveContainer" containerID="3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61" Jan 23 08:36:14 crc kubenswrapper[5102]: E0123 08:36:14.329568 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61\": container with ID starting with 3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61 not found: ID does not exist" containerID="3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.329612 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61"} err="failed to get container status \"3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61\": rpc error: code = NotFound desc = could not find container \"3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61\": container with ID starting with 3fe78c0f99d56de6e7e25ae4b40bbbd786dfd504198d3064626ac7909f74ff61 not found: ID does not exist" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.329642 5102 scope.go:117] "RemoveContainer" containerID="a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178" Jan 23 08:36:14 crc kubenswrapper[5102]: E0123 08:36:14.330333 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178\": container with ID starting with a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178 not found: ID does not exist" containerID="a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.330414 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178"} err="failed to get container status \"a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178\": rpc error: code = NotFound desc = could not find container \"a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178\": container with ID starting with a8fcb3a14ece35e704279667335a356539967173b3138946e73ec95ad1914178 not found: ID does not exist" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.330455 5102 scope.go:117] "RemoveContainer" containerID="531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c" Jan 23 08:36:14 crc kubenswrapper[5102]: E0123 08:36:14.331026 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c\": container with ID starting with 531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c not found: ID does not exist" containerID="531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c" Jan 23 08:36:14 crc kubenswrapper[5102]: I0123 08:36:14.331055 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c"} err="failed to get container status \"531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c\": rpc error: code = NotFound desc = could not find container \"531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c\": container with ID starting with 531b4e2e447992a026bff4d6b0daccaaaf6267685d626f41d082c53528ab4e9c not found: ID does not exist" Jan 23 08:36:15 crc kubenswrapper[5102]: I0123 08:36:15.609881 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" path="/var/lib/kubelet/pods/e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139/volumes" Jan 23 08:36:18 crc kubenswrapper[5102]: I0123 08:36:18.598375 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:36:18 crc kubenswrapper[5102]: E0123 08:36:18.598814 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:36:32 crc kubenswrapper[5102]: I0123 08:36:32.598149 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:36:32 crc kubenswrapper[5102]: E0123 08:36:32.599106 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:36:47 crc kubenswrapper[5102]: I0123 08:36:47.598456 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:36:47 crc kubenswrapper[5102]: E0123 08:36:47.600621 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:37:02 crc kubenswrapper[5102]: I0123 08:37:02.599170 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:37:02 crc kubenswrapper[5102]: E0123 08:37:02.599803 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:37:15 crc kubenswrapper[5102]: I0123 08:37:15.600634 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:37:15 crc kubenswrapper[5102]: E0123 08:37:15.601455 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.650377 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5dd7c"] Jan 23 08:37:23 crc kubenswrapper[5102]: E0123 08:37:23.651333 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="extract-content" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.651349 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="extract-content" Jan 23 08:37:23 crc kubenswrapper[5102]: E0123 08:37:23.651374 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="extract-utilities" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.651384 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="extract-utilities" Jan 23 08:37:23 crc kubenswrapper[5102]: E0123 08:37:23.651423 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="registry-server" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.651434 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="registry-server" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.651631 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="e5fe60b2-4f57-43e6-bcf0-e6aaf66ac139" containerName="registry-server" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.653064 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.659555 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-utilities\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.659629 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zlrlq\" (UniqueName: \"kubernetes.io/projected/a66dfe94-c657-4218-a083-3efa34590206-kube-api-access-zlrlq\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.659757 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-catalog-content\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.664920 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5dd7c"] Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.760761 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-catalog-content\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.760839 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-utilities\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.760878 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zlrlq\" (UniqueName: \"kubernetes.io/projected/a66dfe94-c657-4218-a083-3efa34590206-kube-api-access-zlrlq\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.761394 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-catalog-content\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.761448 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-utilities\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.785339 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zlrlq\" (UniqueName: \"kubernetes.io/projected/a66dfe94-c657-4218-a083-3efa34590206-kube-api-access-zlrlq\") pod \"community-operators-5dd7c\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:23 crc kubenswrapper[5102]: I0123 08:37:23.976952 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:24 crc kubenswrapper[5102]: I0123 08:37:24.310021 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5dd7c"] Jan 23 08:37:24 crc kubenswrapper[5102]: I0123 08:37:24.852440 5102 generic.go:334] "Generic (PLEG): container finished" podID="a66dfe94-c657-4218-a083-3efa34590206" containerID="ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062" exitCode=0 Jan 23 08:37:24 crc kubenswrapper[5102]: I0123 08:37:24.852693 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dd7c" event={"ID":"a66dfe94-c657-4218-a083-3efa34590206","Type":"ContainerDied","Data":"ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062"} Jan 23 08:37:24 crc kubenswrapper[5102]: I0123 08:37:24.853105 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dd7c" event={"ID":"a66dfe94-c657-4218-a083-3efa34590206","Type":"ContainerStarted","Data":"47c5e0a3fae0b83626094fe875ff9303722cb7cea3b6d26e6d62e92ad1d4fdf9"} Jan 23 08:37:25 crc kubenswrapper[5102]: I0123 08:37:25.877346 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dd7c" event={"ID":"a66dfe94-c657-4218-a083-3efa34590206","Type":"ContainerStarted","Data":"0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443"} Jan 23 08:37:26 crc kubenswrapper[5102]: I0123 08:37:26.886434 5102 generic.go:334] "Generic (PLEG): container finished" podID="a66dfe94-c657-4218-a083-3efa34590206" containerID="0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443" exitCode=0 Jan 23 08:37:26 crc kubenswrapper[5102]: I0123 08:37:26.886483 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dd7c" event={"ID":"a66dfe94-c657-4218-a083-3efa34590206","Type":"ContainerDied","Data":"0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443"} Jan 23 08:37:27 crc kubenswrapper[5102]: I0123 08:37:27.599132 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:37:27 crc kubenswrapper[5102]: E0123 08:37:27.599302 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:37:27 crc kubenswrapper[5102]: I0123 08:37:27.894631 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dd7c" event={"ID":"a66dfe94-c657-4218-a083-3efa34590206","Type":"ContainerStarted","Data":"3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe"} Jan 23 08:37:27 crc kubenswrapper[5102]: I0123 08:37:27.916761 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5dd7c" podStartSLOduration=2.397940543 podStartE2EDuration="4.916740368s" podCreationTimestamp="2026-01-23 08:37:23 +0000 UTC" firstStartedPulling="2026-01-23 08:37:24.855024324 +0000 UTC m=+6195.675373339" lastFinishedPulling="2026-01-23 08:37:27.373824199 +0000 UTC m=+6198.194173164" observedRunningTime="2026-01-23 08:37:27.910414393 +0000 UTC m=+6198.730763388" watchObservedRunningTime="2026-01-23 08:37:27.916740368 +0000 UTC m=+6198.737089343" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.386952 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-gwsgn"] Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.392470 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.395861 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gwsgn"] Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.488306 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-utilities\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.488640 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hk8b9\" (UniqueName: \"kubernetes.io/projected/89808e81-478a-42cf-b609-8849ee2f3184-kube-api-access-hk8b9\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.488764 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-catalog-content\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.590026 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-utilities\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.590308 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hk8b9\" (UniqueName: \"kubernetes.io/projected/89808e81-478a-42cf-b609-8849ee2f3184-kube-api-access-hk8b9\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.590445 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-catalog-content\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.590653 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-utilities\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.590823 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-catalog-content\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.625635 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hk8b9\" (UniqueName: \"kubernetes.io/projected/89808e81-478a-42cf-b609-8849ee2f3184-kube-api-access-hk8b9\") pod \"redhat-marketplace-gwsgn\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.729301 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:31 crc kubenswrapper[5102]: I0123 08:37:31.974826 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-gwsgn"] Jan 23 08:37:31 crc kubenswrapper[5102]: W0123 08:37:31.983220 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod89808e81_478a_42cf_b609_8849ee2f3184.slice/crio-3be96f00a19a627c8fe21229f41a5005a69fb784ee60c05abb8a83e6dd6af7d2 WatchSource:0}: Error finding container 3be96f00a19a627c8fe21229f41a5005a69fb784ee60c05abb8a83e6dd6af7d2: Status 404 returned error can't find the container with id 3be96f00a19a627c8fe21229f41a5005a69fb784ee60c05abb8a83e6dd6af7d2 Jan 23 08:37:32 crc kubenswrapper[5102]: I0123 08:37:32.946932 5102 generic.go:334] "Generic (PLEG): container finished" podID="89808e81-478a-42cf-b609-8849ee2f3184" containerID="2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134" exitCode=0 Jan 23 08:37:32 crc kubenswrapper[5102]: I0123 08:37:32.947026 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gwsgn" event={"ID":"89808e81-478a-42cf-b609-8849ee2f3184","Type":"ContainerDied","Data":"2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134"} Jan 23 08:37:32 crc kubenswrapper[5102]: I0123 08:37:32.947117 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gwsgn" event={"ID":"89808e81-478a-42cf-b609-8849ee2f3184","Type":"ContainerStarted","Data":"3be96f00a19a627c8fe21229f41a5005a69fb784ee60c05abb8a83e6dd6af7d2"} Jan 23 08:37:33 crc kubenswrapper[5102]: I0123 08:37:33.964142 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gwsgn" event={"ID":"89808e81-478a-42cf-b609-8849ee2f3184","Type":"ContainerStarted","Data":"d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47"} Jan 23 08:37:33 crc kubenswrapper[5102]: I0123 08:37:33.977400 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:33 crc kubenswrapper[5102]: I0123 08:37:33.978805 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:34 crc kubenswrapper[5102]: I0123 08:37:34.047093 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:34 crc kubenswrapper[5102]: I0123 08:37:34.983484 5102 generic.go:334] "Generic (PLEG): container finished" podID="89808e81-478a-42cf-b609-8849ee2f3184" containerID="d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47" exitCode=0 Jan 23 08:37:34 crc kubenswrapper[5102]: I0123 08:37:34.983601 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gwsgn" event={"ID":"89808e81-478a-42cf-b609-8849ee2f3184","Type":"ContainerDied","Data":"d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47"} Jan 23 08:37:35 crc kubenswrapper[5102]: I0123 08:37:35.043262 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:35 crc kubenswrapper[5102]: I0123 08:37:35.994295 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gwsgn" event={"ID":"89808e81-478a-42cf-b609-8849ee2f3184","Type":"ContainerStarted","Data":"94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4"} Jan 23 08:37:36 crc kubenswrapper[5102]: I0123 08:37:36.418614 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5dd7c"] Jan 23 08:37:37 crc kubenswrapper[5102]: I0123 08:37:37.041301 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-gwsgn" podStartSLOduration=3.328873671 podStartE2EDuration="6.041279844s" podCreationTimestamp="2026-01-23 08:37:31 +0000 UTC" firstStartedPulling="2026-01-23 08:37:32.951984274 +0000 UTC m=+6203.772333289" lastFinishedPulling="2026-01-23 08:37:35.664390457 +0000 UTC m=+6206.484739462" observedRunningTime="2026-01-23 08:37:37.032892274 +0000 UTC m=+6207.853241349" watchObservedRunningTime="2026-01-23 08:37:37.041279844 +0000 UTC m=+6207.861628829" Jan 23 08:37:38 crc kubenswrapper[5102]: I0123 08:37:38.012761 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-5dd7c" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="registry-server" containerID="cri-o://3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe" gracePeriod=2 Jan 23 08:37:38 crc kubenswrapper[5102]: I0123 08:37:38.926850 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.024994 5102 generic.go:334] "Generic (PLEG): container finished" podID="a66dfe94-c657-4218-a083-3efa34590206" containerID="3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe" exitCode=0 Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.025060 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dd7c" event={"ID":"a66dfe94-c657-4218-a083-3efa34590206","Type":"ContainerDied","Data":"3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe"} Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.025098 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5dd7c" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.025118 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5dd7c" event={"ID":"a66dfe94-c657-4218-a083-3efa34590206","Type":"ContainerDied","Data":"47c5e0a3fae0b83626094fe875ff9303722cb7cea3b6d26e6d62e92ad1d4fdf9"} Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.025152 5102 scope.go:117] "RemoveContainer" containerID="3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.051507 5102 scope.go:117] "RemoveContainer" containerID="0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.071342 5102 scope.go:117] "RemoveContainer" containerID="ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.094198 5102 scope.go:117] "RemoveContainer" containerID="3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe" Jan 23 08:37:39 crc kubenswrapper[5102]: E0123 08:37:39.094880 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe\": container with ID starting with 3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe not found: ID does not exist" containerID="3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.094947 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe"} err="failed to get container status \"3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe\": rpc error: code = NotFound desc = could not find container \"3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe\": container with ID starting with 3284cc7d9ad733d5d5bd953f550b953838c6c6e0646938046f328872cfdb8cfe not found: ID does not exist" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.094981 5102 scope.go:117] "RemoveContainer" containerID="0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443" Jan 23 08:37:39 crc kubenswrapper[5102]: E0123 08:37:39.095410 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443\": container with ID starting with 0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443 not found: ID does not exist" containerID="0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.095439 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443"} err="failed to get container status \"0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443\": rpc error: code = NotFound desc = could not find container \"0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443\": container with ID starting with 0d22f8f497f8a507e968afdbc8a1e201d54d02d023c798cf0ced769737753443 not found: ID does not exist" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.095454 5102 scope.go:117] "RemoveContainer" containerID="ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062" Jan 23 08:37:39 crc kubenswrapper[5102]: E0123 08:37:39.096164 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062\": container with ID starting with ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062 not found: ID does not exist" containerID="ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.096194 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062"} err="failed to get container status \"ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062\": rpc error: code = NotFound desc = could not find container \"ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062\": container with ID starting with ae8d10e003b1a4331d2f001a102f98e2e2b542008b9ca328014783d30518f062 not found: ID does not exist" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.106156 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-catalog-content\") pod \"a66dfe94-c657-4218-a083-3efa34590206\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.106352 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zlrlq\" (UniqueName: \"kubernetes.io/projected/a66dfe94-c657-4218-a083-3efa34590206-kube-api-access-zlrlq\") pod \"a66dfe94-c657-4218-a083-3efa34590206\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.106449 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-utilities\") pod \"a66dfe94-c657-4218-a083-3efa34590206\" (UID: \"a66dfe94-c657-4218-a083-3efa34590206\") " Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.107306 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-utilities" (OuterVolumeSpecName: "utilities") pod "a66dfe94-c657-4218-a083-3efa34590206" (UID: "a66dfe94-c657-4218-a083-3efa34590206"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.112183 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a66dfe94-c657-4218-a083-3efa34590206-kube-api-access-zlrlq" (OuterVolumeSpecName: "kube-api-access-zlrlq") pod "a66dfe94-c657-4218-a083-3efa34590206" (UID: "a66dfe94-c657-4218-a083-3efa34590206"). InnerVolumeSpecName "kube-api-access-zlrlq". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.155662 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a66dfe94-c657-4218-a083-3efa34590206" (UID: "a66dfe94-c657-4218-a083-3efa34590206"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.208922 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.209016 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a66dfe94-c657-4218-a083-3efa34590206-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.209033 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zlrlq\" (UniqueName: \"kubernetes.io/projected/a66dfe94-c657-4218-a083-3efa34590206-kube-api-access-zlrlq\") on node \"crc\" DevicePath \"\"" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.382134 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-5dd7c"] Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.389814 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-5dd7c"] Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.604834 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:37:39 crc kubenswrapper[5102]: E0123 08:37:39.605321 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:37:39 crc kubenswrapper[5102]: I0123 08:37:39.616411 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a66dfe94-c657-4218-a083-3efa34590206" path="/var/lib/kubelet/pods/a66dfe94-c657-4218-a083-3efa34590206/volumes" Jan 23 08:37:41 crc kubenswrapper[5102]: I0123 08:37:41.730321 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:41 crc kubenswrapper[5102]: I0123 08:37:41.730734 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:41 crc kubenswrapper[5102]: I0123 08:37:41.805343 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:42 crc kubenswrapper[5102]: I0123 08:37:42.126268 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:42 crc kubenswrapper[5102]: I0123 08:37:42.626894 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gwsgn"] Jan 23 08:37:44 crc kubenswrapper[5102]: I0123 08:37:44.066677 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-gwsgn" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="registry-server" containerID="cri-o://94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4" gracePeriod=2 Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.067902 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.083208 5102 generic.go:334] "Generic (PLEG): container finished" podID="89808e81-478a-42cf-b609-8849ee2f3184" containerID="94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4" exitCode=0 Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.083265 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gwsgn" event={"ID":"89808e81-478a-42cf-b609-8849ee2f3184","Type":"ContainerDied","Data":"94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4"} Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.083313 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-gwsgn" event={"ID":"89808e81-478a-42cf-b609-8849ee2f3184","Type":"ContainerDied","Data":"3be96f00a19a627c8fe21229f41a5005a69fb784ee60c05abb8a83e6dd6af7d2"} Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.083352 5102 scope.go:117] "RemoveContainer" containerID="94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.083720 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-gwsgn" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.125265 5102 scope.go:117] "RemoveContainer" containerID="d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.154227 5102 scope.go:117] "RemoveContainer" containerID="2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.181888 5102 scope.go:117] "RemoveContainer" containerID="94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4" Jan 23 08:37:45 crc kubenswrapper[5102]: E0123 08:37:45.182435 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4\": container with ID starting with 94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4 not found: ID does not exist" containerID="94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.182466 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4"} err="failed to get container status \"94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4\": rpc error: code = NotFound desc = could not find container \"94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4\": container with ID starting with 94c59006f727793df8ebf6407849c1f188d191f5b638bd669e9ba275d5af23a4 not found: ID does not exist" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.182488 5102 scope.go:117] "RemoveContainer" containerID="d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47" Jan 23 08:37:45 crc kubenswrapper[5102]: E0123 08:37:45.182979 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47\": container with ID starting with d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47 not found: ID does not exist" containerID="d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.183007 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47"} err="failed to get container status \"d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47\": rpc error: code = NotFound desc = could not find container \"d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47\": container with ID starting with d74da0e7ac5a6ee1be30f31b364cbb78cdb754a9944bf1789a6dd026afe71f47 not found: ID does not exist" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.183024 5102 scope.go:117] "RemoveContainer" containerID="2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134" Jan 23 08:37:45 crc kubenswrapper[5102]: E0123 08:37:45.183342 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134\": container with ID starting with 2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134 not found: ID does not exist" containerID="2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.183385 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134"} err="failed to get container status \"2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134\": rpc error: code = NotFound desc = could not find container \"2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134\": container with ID starting with 2d6353296758be95f131411915dd9a763e6519ecd4aa9a63527a56787b479134 not found: ID does not exist" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.184041 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-utilities\") pod \"89808e81-478a-42cf-b609-8849ee2f3184\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.184285 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-catalog-content\") pod \"89808e81-478a-42cf-b609-8849ee2f3184\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.184464 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hk8b9\" (UniqueName: \"kubernetes.io/projected/89808e81-478a-42cf-b609-8849ee2f3184-kube-api-access-hk8b9\") pod \"89808e81-478a-42cf-b609-8849ee2f3184\" (UID: \"89808e81-478a-42cf-b609-8849ee2f3184\") " Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.185499 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-utilities" (OuterVolumeSpecName: "utilities") pod "89808e81-478a-42cf-b609-8849ee2f3184" (UID: "89808e81-478a-42cf-b609-8849ee2f3184"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.189732 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/89808e81-478a-42cf-b609-8849ee2f3184-kube-api-access-hk8b9" (OuterVolumeSpecName: "kube-api-access-hk8b9") pod "89808e81-478a-42cf-b609-8849ee2f3184" (UID: "89808e81-478a-42cf-b609-8849ee2f3184"). InnerVolumeSpecName "kube-api-access-hk8b9". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.206701 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "89808e81-478a-42cf-b609-8849ee2f3184" (UID: "89808e81-478a-42cf-b609-8849ee2f3184"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.286321 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.286644 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/89808e81-478a-42cf-b609-8849ee2f3184-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.286757 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hk8b9\" (UniqueName: \"kubernetes.io/projected/89808e81-478a-42cf-b609-8849ee2f3184-kube-api-access-hk8b9\") on node \"crc\" DevicePath \"\"" Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.443108 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-gwsgn"] Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.450884 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-gwsgn"] Jan 23 08:37:45 crc kubenswrapper[5102]: I0123 08:37:45.617072 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="89808e81-478a-42cf-b609-8849ee2f3184" path="/var/lib/kubelet/pods/89808e81-478a-42cf-b609-8849ee2f3184/volumes" Jan 23 08:37:54 crc kubenswrapper[5102]: I0123 08:37:54.599889 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:37:54 crc kubenswrapper[5102]: E0123 08:37:54.600664 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:38:07 crc kubenswrapper[5102]: I0123 08:38:07.598567 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:38:07 crc kubenswrapper[5102]: E0123 08:38:07.599978 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.297365 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-j2j2z"] Jan 23 08:38:09 crc kubenswrapper[5102]: E0123 08:38:09.298262 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="extract-utilities" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298287 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="extract-utilities" Jan 23 08:38:09 crc kubenswrapper[5102]: E0123 08:38:09.298309 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="extract-content" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298322 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="extract-content" Jan 23 08:38:09 crc kubenswrapper[5102]: E0123 08:38:09.298362 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="registry-server" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298376 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="registry-server" Jan 23 08:38:09 crc kubenswrapper[5102]: E0123 08:38:09.298405 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="extract-utilities" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298419 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="extract-utilities" Jan 23 08:38:09 crc kubenswrapper[5102]: E0123 08:38:09.298447 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="extract-content" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298460 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="extract-content" Jan 23 08:38:09 crc kubenswrapper[5102]: E0123 08:38:09.298478 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="registry-server" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298493 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="registry-server" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298878 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="89808e81-478a-42cf-b609-8849ee2f3184" containerName="registry-server" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.298918 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="a66dfe94-c657-4218-a083-3efa34590206" containerName="registry-server" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.300933 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.322599 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j2j2z"] Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.377596 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-catalog-content\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.377659 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jgv6s\" (UniqueName: \"kubernetes.io/projected/14e64563-7b04-45e3-9d22-ea42afb0e7a7-kube-api-access-jgv6s\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.377744 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-utilities\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.478345 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-catalog-content\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.478403 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jgv6s\" (UniqueName: \"kubernetes.io/projected/14e64563-7b04-45e3-9d22-ea42afb0e7a7-kube-api-access-jgv6s\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.478452 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-utilities\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.479014 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-utilities\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.479026 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-catalog-content\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.500895 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jgv6s\" (UniqueName: \"kubernetes.io/projected/14e64563-7b04-45e3-9d22-ea42afb0e7a7-kube-api-access-jgv6s\") pod \"redhat-operators-j2j2z\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:09 crc kubenswrapper[5102]: I0123 08:38:09.635197 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:10 crc kubenswrapper[5102]: I0123 08:38:10.814524 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-j2j2z"] Jan 23 08:38:11 crc kubenswrapper[5102]: I0123 08:38:11.348067 5102 generic.go:334] "Generic (PLEG): container finished" podID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerID="bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f" exitCode=0 Jan 23 08:38:11 crc kubenswrapper[5102]: I0123 08:38:11.348217 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j2j2z" event={"ID":"14e64563-7b04-45e3-9d22-ea42afb0e7a7","Type":"ContainerDied","Data":"bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f"} Jan 23 08:38:11 crc kubenswrapper[5102]: I0123 08:38:11.348519 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j2j2z" event={"ID":"14e64563-7b04-45e3-9d22-ea42afb0e7a7","Type":"ContainerStarted","Data":"beeba50f68ff42e3ec9892bdf9bd769cd54bd85db580cfa9f9d82dbd10b1a7c8"} Jan 23 08:38:12 crc kubenswrapper[5102]: I0123 08:38:12.363357 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j2j2z" event={"ID":"14e64563-7b04-45e3-9d22-ea42afb0e7a7","Type":"ContainerStarted","Data":"7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823"} Jan 23 08:38:13 crc kubenswrapper[5102]: I0123 08:38:13.379600 5102 generic.go:334] "Generic (PLEG): container finished" podID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerID="7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823" exitCode=0 Jan 23 08:38:13 crc kubenswrapper[5102]: I0123 08:38:13.379677 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j2j2z" event={"ID":"14e64563-7b04-45e3-9d22-ea42afb0e7a7","Type":"ContainerDied","Data":"7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823"} Jan 23 08:38:14 crc kubenswrapper[5102]: I0123 08:38:14.391327 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j2j2z" event={"ID":"14e64563-7b04-45e3-9d22-ea42afb0e7a7","Type":"ContainerStarted","Data":"e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3"} Jan 23 08:38:14 crc kubenswrapper[5102]: I0123 08:38:14.423433 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-j2j2z" podStartSLOduration=2.850481909 podStartE2EDuration="5.42341085s" podCreationTimestamp="2026-01-23 08:38:09 +0000 UTC" firstStartedPulling="2026-01-23 08:38:11.350944103 +0000 UTC m=+6242.171293108" lastFinishedPulling="2026-01-23 08:38:13.923873034 +0000 UTC m=+6244.744222049" observedRunningTime="2026-01-23 08:38:14.416217207 +0000 UTC m=+6245.236566223" watchObservedRunningTime="2026-01-23 08:38:14.42341085 +0000 UTC m=+6245.243759835" Jan 23 08:38:19 crc kubenswrapper[5102]: I0123 08:38:19.636691 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:19 crc kubenswrapper[5102]: I0123 08:38:19.636782 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:20 crc kubenswrapper[5102]: I0123 08:38:20.692321 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-j2j2z" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="registry-server" probeResult="failure" output=< Jan 23 08:38:20 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 08:38:20 crc kubenswrapper[5102]: > Jan 23 08:38:22 crc kubenswrapper[5102]: I0123 08:38:22.597996 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:38:22 crc kubenswrapper[5102]: E0123 08:38:22.599536 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:38:29 crc kubenswrapper[5102]: I0123 08:38:29.712421 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:29 crc kubenswrapper[5102]: I0123 08:38:29.788120 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:29 crc kubenswrapper[5102]: I0123 08:38:29.963497 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j2j2z"] Jan 23 08:38:31 crc kubenswrapper[5102]: I0123 08:38:31.534327 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-j2j2z" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="registry-server" containerID="cri-o://e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3" gracePeriod=2 Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.038699 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.203028 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jgv6s\" (UniqueName: \"kubernetes.io/projected/14e64563-7b04-45e3-9d22-ea42afb0e7a7-kube-api-access-jgv6s\") pod \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.203082 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-utilities\") pod \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.203183 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-catalog-content\") pod \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\" (UID: \"14e64563-7b04-45e3-9d22-ea42afb0e7a7\") " Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.203857 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-utilities" (OuterVolumeSpecName: "utilities") pod "14e64563-7b04-45e3-9d22-ea42afb0e7a7" (UID: "14e64563-7b04-45e3-9d22-ea42afb0e7a7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.207808 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14e64563-7b04-45e3-9d22-ea42afb0e7a7-kube-api-access-jgv6s" (OuterVolumeSpecName: "kube-api-access-jgv6s") pod "14e64563-7b04-45e3-9d22-ea42afb0e7a7" (UID: "14e64563-7b04-45e3-9d22-ea42afb0e7a7"). InnerVolumeSpecName "kube-api-access-jgv6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.304992 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jgv6s\" (UniqueName: \"kubernetes.io/projected/14e64563-7b04-45e3-9d22-ea42afb0e7a7-kube-api-access-jgv6s\") on node \"crc\" DevicePath \"\"" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.305020 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.342293 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14e64563-7b04-45e3-9d22-ea42afb0e7a7" (UID: "14e64563-7b04-45e3-9d22-ea42afb0e7a7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.406398 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14e64563-7b04-45e3-9d22-ea42afb0e7a7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.543088 5102 generic.go:334] "Generic (PLEG): container finished" podID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerID="e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3" exitCode=0 Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.543157 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-j2j2z" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.543164 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j2j2z" event={"ID":"14e64563-7b04-45e3-9d22-ea42afb0e7a7","Type":"ContainerDied","Data":"e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3"} Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.543583 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-j2j2z" event={"ID":"14e64563-7b04-45e3-9d22-ea42afb0e7a7","Type":"ContainerDied","Data":"beeba50f68ff42e3ec9892bdf9bd769cd54bd85db580cfa9f9d82dbd10b1a7c8"} Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.543601 5102 scope.go:117] "RemoveContainer" containerID="e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.564873 5102 scope.go:117] "RemoveContainer" containerID="7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.577120 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-j2j2z"] Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.591566 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-j2j2z"] Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.605351 5102 scope.go:117] "RemoveContainer" containerID="bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.624168 5102 scope.go:117] "RemoveContainer" containerID="e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3" Jan 23 08:38:32 crc kubenswrapper[5102]: E0123 08:38:32.624968 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3\": container with ID starting with e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3 not found: ID does not exist" containerID="e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.625019 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3"} err="failed to get container status \"e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3\": rpc error: code = NotFound desc = could not find container \"e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3\": container with ID starting with e768023aa8e3521a70755dce0ee1e58ec5853c56cfe819b2c559cb4ad15919d3 not found: ID does not exist" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.625065 5102 scope.go:117] "RemoveContainer" containerID="7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823" Jan 23 08:38:32 crc kubenswrapper[5102]: E0123 08:38:32.625287 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823\": container with ID starting with 7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823 not found: ID does not exist" containerID="7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.625313 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823"} err="failed to get container status \"7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823\": rpc error: code = NotFound desc = could not find container \"7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823\": container with ID starting with 7d834e0c342b2026a5c45a02156a1e5b77114a75e8b49a0c5a775ec2f0b5e823 not found: ID does not exist" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.625326 5102 scope.go:117] "RemoveContainer" containerID="bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f" Jan 23 08:38:32 crc kubenswrapper[5102]: E0123 08:38:32.625511 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f\": container with ID starting with bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f not found: ID does not exist" containerID="bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f" Jan 23 08:38:32 crc kubenswrapper[5102]: I0123 08:38:32.625533 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f"} err="failed to get container status \"bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f\": rpc error: code = NotFound desc = could not find container \"bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f\": container with ID starting with bb3459be7e5daa74b44c1e77effc4fe12477a3143501c7ac0e490fc6a6f6f24f not found: ID does not exist" Jan 23 08:38:33 crc kubenswrapper[5102]: I0123 08:38:33.607371 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" path="/var/lib/kubelet/pods/14e64563-7b04-45e3-9d22-ea42afb0e7a7/volumes" Jan 23 08:38:34 crc kubenswrapper[5102]: I0123 08:38:34.598500 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:38:34 crc kubenswrapper[5102]: E0123 08:38:34.599103 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:38:49 crc kubenswrapper[5102]: I0123 08:38:49.605793 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:38:49 crc kubenswrapper[5102]: E0123 08:38:49.606882 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:39:02 crc kubenswrapper[5102]: I0123 08:39:02.598688 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:39:02 crc kubenswrapper[5102]: E0123 08:39:02.599848 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:39:14 crc kubenswrapper[5102]: I0123 08:39:14.600434 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:39:14 crc kubenswrapper[5102]: E0123 08:39:14.603661 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:39:25 crc kubenswrapper[5102]: I0123 08:39:25.599039 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:39:26 crc kubenswrapper[5102]: I0123 08:39:26.046849 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"07abb82526a403ac6592321441673da2e6c5d00f4a93751c015f741089d1a74e"} Jan 23 08:41:46 crc kubenswrapper[5102]: I0123 08:41:46.768931 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:41:46 crc kubenswrapper[5102]: I0123 08:41:46.769610 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:42:16 crc kubenswrapper[5102]: I0123 08:42:16.769392 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:42:16 crc kubenswrapper[5102]: I0123 08:42:16.770074 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:42:46 crc kubenswrapper[5102]: I0123 08:42:46.768714 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:42:46 crc kubenswrapper[5102]: I0123 08:42:46.769436 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:42:46 crc kubenswrapper[5102]: I0123 08:42:46.769503 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:42:46 crc kubenswrapper[5102]: I0123 08:42:46.770586 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"07abb82526a403ac6592321441673da2e6c5d00f4a93751c015f741089d1a74e"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:42:46 crc kubenswrapper[5102]: I0123 08:42:46.770693 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://07abb82526a403ac6592321441673da2e6c5d00f4a93751c015f741089d1a74e" gracePeriod=600 Jan 23 08:42:47 crc kubenswrapper[5102]: I0123 08:42:47.079595 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="07abb82526a403ac6592321441673da2e6c5d00f4a93751c015f741089d1a74e" exitCode=0 Jan 23 08:42:47 crc kubenswrapper[5102]: I0123 08:42:47.079653 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"07abb82526a403ac6592321441673da2e6c5d00f4a93751c015f741089d1a74e"} Jan 23 08:42:47 crc kubenswrapper[5102]: I0123 08:42:47.079708 5102 scope.go:117] "RemoveContainer" containerID="8e9f1fee4b202801ff98ed5b084ebc27f6cd328a7503aa300efbda9acaaf3f83" Jan 23 08:42:48 crc kubenswrapper[5102]: I0123 08:42:48.093583 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8"} Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.171817 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw"] Jan 23 08:45:00 crc kubenswrapper[5102]: E0123 08:45:00.172776 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="registry-server" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.172792 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="registry-server" Jan 23 08:45:00 crc kubenswrapper[5102]: E0123 08:45:00.172835 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="extract-utilities" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.172846 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="extract-utilities" Jan 23 08:45:00 crc kubenswrapper[5102]: E0123 08:45:00.172862 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="extract-content" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.172872 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="extract-content" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.173063 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="14e64563-7b04-45e3-9d22-ea42afb0e7a7" containerName="registry-server" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.173695 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.177046 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.178912 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.196066 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw"] Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.288413 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8412fda-207a-441c-bbf3-e39c66fd4fd4-secret-volume\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.288591 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8412fda-207a-441c-bbf3-e39c66fd4fd4-config-volume\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.288785 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v7sfg\" (UniqueName: \"kubernetes.io/projected/d8412fda-207a-441c-bbf3-e39c66fd4fd4-kube-api-access-v7sfg\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.389754 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8412fda-207a-441c-bbf3-e39c66fd4fd4-config-volume\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.389872 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v7sfg\" (UniqueName: \"kubernetes.io/projected/d8412fda-207a-441c-bbf3-e39c66fd4fd4-kube-api-access-v7sfg\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.389957 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8412fda-207a-441c-bbf3-e39c66fd4fd4-secret-volume\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.391052 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8412fda-207a-441c-bbf3-e39c66fd4fd4-config-volume\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.402140 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8412fda-207a-441c-bbf3-e39c66fd4fd4-secret-volume\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.427088 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v7sfg\" (UniqueName: \"kubernetes.io/projected/d8412fda-207a-441c-bbf3-e39c66fd4fd4-kube-api-access-v7sfg\") pod \"collect-profiles-29485965-ftqnw\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:00 crc kubenswrapper[5102]: I0123 08:45:00.497444 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:01 crc kubenswrapper[5102]: I0123 08:45:01.041590 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw"] Jan 23 08:45:01 crc kubenswrapper[5102]: W0123 08:45:01.049020 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd8412fda_207a_441c_bbf3_e39c66fd4fd4.slice/crio-1c349fc10ae5c49620a79b2c97a3c587a7df1a16e1cec23551813ac388f1e65b WatchSource:0}: Error finding container 1c349fc10ae5c49620a79b2c97a3c587a7df1a16e1cec23551813ac388f1e65b: Status 404 returned error can't find the container with id 1c349fc10ae5c49620a79b2c97a3c587a7df1a16e1cec23551813ac388f1e65b Jan 23 08:45:01 crc kubenswrapper[5102]: I0123 08:45:01.321394 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" event={"ID":"d8412fda-207a-441c-bbf3-e39c66fd4fd4","Type":"ContainerStarted","Data":"a6e338a2177ff5257942f1d5fa8723117be5b7fbcb589a19161b022f6fa22c4a"} Jan 23 08:45:01 crc kubenswrapper[5102]: I0123 08:45:01.321715 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" event={"ID":"d8412fda-207a-441c-bbf3-e39c66fd4fd4","Type":"ContainerStarted","Data":"1c349fc10ae5c49620a79b2c97a3c587a7df1a16e1cec23551813ac388f1e65b"} Jan 23 08:45:01 crc kubenswrapper[5102]: I0123 08:45:01.341223 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" podStartSLOduration=1.341199192 podStartE2EDuration="1.341199192s" podCreationTimestamp="2026-01-23 08:45:00 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2026-01-23 08:45:01.334139884 +0000 UTC m=+6652.154488869" watchObservedRunningTime="2026-01-23 08:45:01.341199192 +0000 UTC m=+6652.161548167" Jan 23 08:45:02 crc kubenswrapper[5102]: I0123 08:45:02.334616 5102 generic.go:334] "Generic (PLEG): container finished" podID="d8412fda-207a-441c-bbf3-e39c66fd4fd4" containerID="a6e338a2177ff5257942f1d5fa8723117be5b7fbcb589a19161b022f6fa22c4a" exitCode=0 Jan 23 08:45:02 crc kubenswrapper[5102]: I0123 08:45:02.334679 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" event={"ID":"d8412fda-207a-441c-bbf3-e39c66fd4fd4","Type":"ContainerDied","Data":"a6e338a2177ff5257942f1d5fa8723117be5b7fbcb589a19161b022f6fa22c4a"} Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.661470 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.749230 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8412fda-207a-441c-bbf3-e39c66fd4fd4-secret-volume\") pod \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.749326 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8412fda-207a-441c-bbf3-e39c66fd4fd4-config-volume\") pod \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.749367 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v7sfg\" (UniqueName: \"kubernetes.io/projected/d8412fda-207a-441c-bbf3-e39c66fd4fd4-kube-api-access-v7sfg\") pod \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\" (UID: \"d8412fda-207a-441c-bbf3-e39c66fd4fd4\") " Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.750073 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d8412fda-207a-441c-bbf3-e39c66fd4fd4-config-volume" (OuterVolumeSpecName: "config-volume") pod "d8412fda-207a-441c-bbf3-e39c66fd4fd4" (UID: "d8412fda-207a-441c-bbf3-e39c66fd4fd4"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.755638 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d8412fda-207a-441c-bbf3-e39c66fd4fd4-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "d8412fda-207a-441c-bbf3-e39c66fd4fd4" (UID: "d8412fda-207a-441c-bbf3-e39c66fd4fd4"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.756870 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d8412fda-207a-441c-bbf3-e39c66fd4fd4-kube-api-access-v7sfg" (OuterVolumeSpecName: "kube-api-access-v7sfg") pod "d8412fda-207a-441c-bbf3-e39c66fd4fd4" (UID: "d8412fda-207a-441c-bbf3-e39c66fd4fd4"). InnerVolumeSpecName "kube-api-access-v7sfg". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.851234 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/d8412fda-207a-441c-bbf3-e39c66fd4fd4-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.851268 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/d8412fda-207a-441c-bbf3-e39c66fd4fd4-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 08:45:03 crc kubenswrapper[5102]: I0123 08:45:03.851277 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v7sfg\" (UniqueName: \"kubernetes.io/projected/d8412fda-207a-441c-bbf3-e39c66fd4fd4-kube-api-access-v7sfg\") on node \"crc\" DevicePath \"\"" Jan 23 08:45:04 crc kubenswrapper[5102]: I0123 08:45:04.357846 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" event={"ID":"d8412fda-207a-441c-bbf3-e39c66fd4fd4","Type":"ContainerDied","Data":"1c349fc10ae5c49620a79b2c97a3c587a7df1a16e1cec23551813ac388f1e65b"} Jan 23 08:45:04 crc kubenswrapper[5102]: I0123 08:45:04.358260 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="1c349fc10ae5c49620a79b2c97a3c587a7df1a16e1cec23551813ac388f1e65b" Jan 23 08:45:04 crc kubenswrapper[5102]: I0123 08:45:04.357913 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485965-ftqnw" Jan 23 08:45:04 crc kubenswrapper[5102]: I0123 08:45:04.447915 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh"] Jan 23 08:45:04 crc kubenswrapper[5102]: I0123 08:45:04.457630 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485920-h8pqh"] Jan 23 08:45:05 crc kubenswrapper[5102]: I0123 08:45:05.615635 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b3c6feb8-053e-492c-ad70-7c44e58ea9a2" path="/var/lib/kubelet/pods/b3c6feb8-053e-492c-ad70-7c44e58ea9a2/volumes" Jan 23 08:45:16 crc kubenswrapper[5102]: I0123 08:45:16.768720 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:45:16 crc kubenswrapper[5102]: I0123 08:45:16.769921 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:45:17 crc kubenswrapper[5102]: I0123 08:45:17.476818 5102 scope.go:117] "RemoveContainer" containerID="fb7b9b321b94ac5fcbc87c4f68ec6a189414a6af89a76fe981a0a3fef6eb1f70" Jan 23 08:45:46 crc kubenswrapper[5102]: I0123 08:45:46.768858 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:45:46 crc kubenswrapper[5102]: I0123 08:45:46.769889 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.768586 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.769313 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.769360 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.769979 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.770036 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" gracePeriod=600 Jan 23 08:46:16 crc kubenswrapper[5102]: E0123 08:46:16.919571 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.974618 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" exitCode=0 Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.974655 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8"} Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.974715 5102 scope.go:117] "RemoveContainer" containerID="07abb82526a403ac6592321441673da2e6c5d00f4a93751c015f741089d1a74e" Jan 23 08:46:16 crc kubenswrapper[5102]: I0123 08:46:16.975232 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:46:16 crc kubenswrapper[5102]: E0123 08:46:16.975481 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:46:32 crc kubenswrapper[5102]: I0123 08:46:32.598009 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:46:32 crc kubenswrapper[5102]: E0123 08:46:32.599010 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:46:43 crc kubenswrapper[5102]: I0123 08:46:43.598879 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:46:43 crc kubenswrapper[5102]: E0123 08:46:43.599954 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.759946 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-56p8k"] Jan 23 08:46:45 crc kubenswrapper[5102]: E0123 08:46:45.760541 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d8412fda-207a-441c-bbf3-e39c66fd4fd4" containerName="collect-profiles" Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.760554 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="d8412fda-207a-441c-bbf3-e39c66fd4fd4" containerName="collect-profiles" Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.760737 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="d8412fda-207a-441c-bbf3-e39c66fd4fd4" containerName="collect-profiles" Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.761659 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.774186 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-56p8k"] Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.928074 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-catalog-content\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.928127 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-utilities\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:45 crc kubenswrapper[5102]: I0123 08:46:45.928191 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p8cz8\" (UniqueName: \"kubernetes.io/projected/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-kube-api-access-p8cz8\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.030012 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-catalog-content\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.030269 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-utilities\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.030324 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p8cz8\" (UniqueName: \"kubernetes.io/projected/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-kube-api-access-p8cz8\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.030495 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-catalog-content\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.030762 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-utilities\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.051744 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p8cz8\" (UniqueName: \"kubernetes.io/projected/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-kube-api-access-p8cz8\") pod \"certified-operators-56p8k\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.136002 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:46 crc kubenswrapper[5102]: I0123 08:46:46.411563 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-56p8k"] Jan 23 08:46:47 crc kubenswrapper[5102]: I0123 08:46:47.268014 5102 generic.go:334] "Generic (PLEG): container finished" podID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerID="c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe" exitCode=0 Jan 23 08:46:47 crc kubenswrapper[5102]: I0123 08:46:47.268077 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-56p8k" event={"ID":"adbf38c2-5d9e-41e1-b045-e3849e65c9a4","Type":"ContainerDied","Data":"c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe"} Jan 23 08:46:47 crc kubenswrapper[5102]: I0123 08:46:47.268116 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-56p8k" event={"ID":"adbf38c2-5d9e-41e1-b045-e3849e65c9a4","Type":"ContainerStarted","Data":"f818435791a45b11bbdca7240bc6afba5655b4b41618ce50d9693836a973ff7d"} Jan 23 08:46:47 crc kubenswrapper[5102]: I0123 08:46:47.273259 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 08:46:48 crc kubenswrapper[5102]: I0123 08:46:48.278271 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-56p8k" event={"ID":"adbf38c2-5d9e-41e1-b045-e3849e65c9a4","Type":"ContainerStarted","Data":"3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce"} Jan 23 08:46:49 crc kubenswrapper[5102]: I0123 08:46:49.288454 5102 generic.go:334] "Generic (PLEG): container finished" podID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerID="3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce" exitCode=0 Jan 23 08:46:49 crc kubenswrapper[5102]: I0123 08:46:49.288530 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-56p8k" event={"ID":"adbf38c2-5d9e-41e1-b045-e3849e65c9a4","Type":"ContainerDied","Data":"3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce"} Jan 23 08:46:50 crc kubenswrapper[5102]: I0123 08:46:50.297032 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-56p8k" event={"ID":"adbf38c2-5d9e-41e1-b045-e3849e65c9a4","Type":"ContainerStarted","Data":"097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099"} Jan 23 08:46:50 crc kubenswrapper[5102]: I0123 08:46:50.315276 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-56p8k" podStartSLOduration=2.906741189 podStartE2EDuration="5.315259076s" podCreationTimestamp="2026-01-23 08:46:45 +0000 UTC" firstStartedPulling="2026-01-23 08:46:47.272759667 +0000 UTC m=+6758.093108682" lastFinishedPulling="2026-01-23 08:46:49.681277544 +0000 UTC m=+6760.501626569" observedRunningTime="2026-01-23 08:46:50.314227854 +0000 UTC m=+6761.134576839" watchObservedRunningTime="2026-01-23 08:46:50.315259076 +0000 UTC m=+6761.135608051" Jan 23 08:46:56 crc kubenswrapper[5102]: I0123 08:46:56.136146 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:56 crc kubenswrapper[5102]: I0123 08:46:56.136717 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:56 crc kubenswrapper[5102]: I0123 08:46:56.181372 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:56 crc kubenswrapper[5102]: I0123 08:46:56.391377 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:56 crc kubenswrapper[5102]: I0123 08:46:56.451842 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-56p8k"] Jan 23 08:46:57 crc kubenswrapper[5102]: I0123 08:46:57.598511 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:46:57 crc kubenswrapper[5102]: E0123 08:46:57.599073 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.361069 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-56p8k" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="registry-server" containerID="cri-o://097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099" gracePeriod=2 Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.822665 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.952079 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-utilities\") pod \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.952125 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-catalog-content\") pod \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.952219 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-p8cz8\" (UniqueName: \"kubernetes.io/projected/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-kube-api-access-p8cz8\") pod \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\" (UID: \"adbf38c2-5d9e-41e1-b045-e3849e65c9a4\") " Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.953173 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-utilities" (OuterVolumeSpecName: "utilities") pod "adbf38c2-5d9e-41e1-b045-e3849e65c9a4" (UID: "adbf38c2-5d9e-41e1-b045-e3849e65c9a4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.958107 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-kube-api-access-p8cz8" (OuterVolumeSpecName: "kube-api-access-p8cz8") pod "adbf38c2-5d9e-41e1-b045-e3849e65c9a4" (UID: "adbf38c2-5d9e-41e1-b045-e3849e65c9a4"). InnerVolumeSpecName "kube-api-access-p8cz8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:46:58 crc kubenswrapper[5102]: I0123 08:46:58.998278 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "adbf38c2-5d9e-41e1-b045-e3849e65c9a4" (UID: "adbf38c2-5d9e-41e1-b045-e3849e65c9a4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.053985 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-p8cz8\" (UniqueName: \"kubernetes.io/projected/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-kube-api-access-p8cz8\") on node \"crc\" DevicePath \"\"" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.054036 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.054048 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/adbf38c2-5d9e-41e1-b045-e3849e65c9a4-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.374063 5102 generic.go:334] "Generic (PLEG): container finished" podID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerID="097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099" exitCode=0 Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.374137 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-56p8k" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.374144 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-56p8k" event={"ID":"adbf38c2-5d9e-41e1-b045-e3849e65c9a4","Type":"ContainerDied","Data":"097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099"} Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.374751 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-56p8k" event={"ID":"adbf38c2-5d9e-41e1-b045-e3849e65c9a4","Type":"ContainerDied","Data":"f818435791a45b11bbdca7240bc6afba5655b4b41618ce50d9693836a973ff7d"} Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.374779 5102 scope.go:117] "RemoveContainer" containerID="097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.404073 5102 scope.go:117] "RemoveContainer" containerID="3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.428217 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-56p8k"] Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.433210 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-56p8k"] Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.436079 5102 scope.go:117] "RemoveContainer" containerID="c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.458077 5102 scope.go:117] "RemoveContainer" containerID="097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099" Jan 23 08:46:59 crc kubenswrapper[5102]: E0123 08:46:59.458681 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099\": container with ID starting with 097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099 not found: ID does not exist" containerID="097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.458752 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099"} err="failed to get container status \"097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099\": rpc error: code = NotFound desc = could not find container \"097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099\": container with ID starting with 097ac6ee6ee06749dd43221730639544bc1fc1e654b85816b1582a9304d30099 not found: ID does not exist" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.458794 5102 scope.go:117] "RemoveContainer" containerID="3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce" Jan 23 08:46:59 crc kubenswrapper[5102]: E0123 08:46:59.459179 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce\": container with ID starting with 3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce not found: ID does not exist" containerID="3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.459221 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce"} err="failed to get container status \"3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce\": rpc error: code = NotFound desc = could not find container \"3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce\": container with ID starting with 3a97672c46da3804e5ad6ba1047a20a32c7f104ea1ce950a9089a6c7e221b2ce not found: ID does not exist" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.459245 5102 scope.go:117] "RemoveContainer" containerID="c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe" Jan 23 08:46:59 crc kubenswrapper[5102]: E0123 08:46:59.459707 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe\": container with ID starting with c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe not found: ID does not exist" containerID="c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.459757 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe"} err="failed to get container status \"c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe\": rpc error: code = NotFound desc = could not find container \"c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe\": container with ID starting with c5002330e84ef91d31c7f3db0be0b9619ebd91d25984433b383dd197cd59f4fe not found: ID does not exist" Jan 23 08:46:59 crc kubenswrapper[5102]: I0123 08:46:59.610484 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" path="/var/lib/kubelet/pods/adbf38c2-5d9e-41e1-b045-e3849e65c9a4/volumes" Jan 23 08:47:12 crc kubenswrapper[5102]: I0123 08:47:12.597955 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:47:12 crc kubenswrapper[5102]: E0123 08:47:12.598826 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:47:27 crc kubenswrapper[5102]: I0123 08:47:27.598453 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:47:27 crc kubenswrapper[5102]: E0123 08:47:27.599495 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:47:40 crc kubenswrapper[5102]: I0123 08:47:40.598530 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:47:40 crc kubenswrapper[5102]: E0123 08:47:40.600341 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:47:53 crc kubenswrapper[5102]: I0123 08:47:53.598672 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:47:53 crc kubenswrapper[5102]: E0123 08:47:53.599421 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:48:06 crc kubenswrapper[5102]: I0123 08:48:06.598451 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:48:06 crc kubenswrapper[5102]: E0123 08:48:06.599649 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.371029 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-zz6nt"] Jan 23 08:48:14 crc kubenswrapper[5102]: E0123 08:48:14.372007 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="registry-server" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.372027 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="registry-server" Jan 23 08:48:14 crc kubenswrapper[5102]: E0123 08:48:14.372063 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="extract-content" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.372074 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="extract-content" Jan 23 08:48:14 crc kubenswrapper[5102]: E0123 08:48:14.372115 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="extract-utilities" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.372128 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="extract-utilities" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.372380 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="adbf38c2-5d9e-41e1-b045-e3849e65c9a4" containerName="registry-server" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.374260 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.394465 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz6nt"] Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.443386 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-catalog-content\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.443429 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-qwdh7\" (UniqueName: \"kubernetes.io/projected/4965c67c-fe24-459f-ad5f-14d2d222ca83-kube-api-access-qwdh7\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.443452 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-utilities\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.544306 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-qwdh7\" (UniqueName: \"kubernetes.io/projected/4965c67c-fe24-459f-ad5f-14d2d222ca83-kube-api-access-qwdh7\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.544362 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-utilities\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.544455 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-catalog-content\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.544971 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-catalog-content\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.545236 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-utilities\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.579509 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-qwdh7\" (UniqueName: \"kubernetes.io/projected/4965c67c-fe24-459f-ad5f-14d2d222ca83-kube-api-access-qwdh7\") pod \"redhat-operators-zz6nt\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.696518 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:14 crc kubenswrapper[5102]: I0123 08:48:14.936239 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-zz6nt"] Jan 23 08:48:15 crc kubenswrapper[5102]: I0123 08:48:15.091182 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6nt" event={"ID":"4965c67c-fe24-459f-ad5f-14d2d222ca83","Type":"ContainerStarted","Data":"d9e1a2114d9e6a91781e4928534397dbdaeb2ea0de8ddecf5a9167c63dca877c"} Jan 23 08:48:16 crc kubenswrapper[5102]: I0123 08:48:16.099093 5102 generic.go:334] "Generic (PLEG): container finished" podID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerID="e45d73d3530f1590dba7075b50e2927fe40c220d81f8db0aab1a85dbbf4aa770" exitCode=0 Jan 23 08:48:16 crc kubenswrapper[5102]: I0123 08:48:16.099140 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6nt" event={"ID":"4965c67c-fe24-459f-ad5f-14d2d222ca83","Type":"ContainerDied","Data":"e45d73d3530f1590dba7075b50e2927fe40c220d81f8db0aab1a85dbbf4aa770"} Jan 23 08:48:17 crc kubenswrapper[5102]: I0123 08:48:17.107263 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6nt" event={"ID":"4965c67c-fe24-459f-ad5f-14d2d222ca83","Type":"ContainerStarted","Data":"1cb922414ad2e77d71075ccad482e85f208bb4e04d23d4553e4a02438a5ee713"} Jan 23 08:48:18 crc kubenswrapper[5102]: I0123 08:48:18.116714 5102 generic.go:334] "Generic (PLEG): container finished" podID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerID="1cb922414ad2e77d71075ccad482e85f208bb4e04d23d4553e4a02438a5ee713" exitCode=0 Jan 23 08:48:18 crc kubenswrapper[5102]: I0123 08:48:18.116793 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6nt" event={"ID":"4965c67c-fe24-459f-ad5f-14d2d222ca83","Type":"ContainerDied","Data":"1cb922414ad2e77d71075ccad482e85f208bb4e04d23d4553e4a02438a5ee713"} Jan 23 08:48:19 crc kubenswrapper[5102]: I0123 08:48:19.127062 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6nt" event={"ID":"4965c67c-fe24-459f-ad5f-14d2d222ca83","Type":"ContainerStarted","Data":"d52d3e40022a998e4decb75d0cf916d5c63d8770c18717275cb227ca3ba31515"} Jan 23 08:48:19 crc kubenswrapper[5102]: I0123 08:48:19.151940 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-zz6nt" podStartSLOduration=2.605762631 podStartE2EDuration="5.151919803s" podCreationTimestamp="2026-01-23 08:48:14 +0000 UTC" firstStartedPulling="2026-01-23 08:48:16.100859399 +0000 UTC m=+6846.921208374" lastFinishedPulling="2026-01-23 08:48:18.647016571 +0000 UTC m=+6849.467365546" observedRunningTime="2026-01-23 08:48:19.146514166 +0000 UTC m=+6849.966863141" watchObservedRunningTime="2026-01-23 08:48:19.151919803 +0000 UTC m=+6849.972268788" Jan 23 08:48:21 crc kubenswrapper[5102]: I0123 08:48:21.597890 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:48:21 crc kubenswrapper[5102]: E0123 08:48:21.598501 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:48:24 crc kubenswrapper[5102]: I0123 08:48:24.697630 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:24 crc kubenswrapper[5102]: I0123 08:48:24.699781 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:25 crc kubenswrapper[5102]: I0123 08:48:25.785946 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-zz6nt" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="registry-server" probeResult="failure" output=< Jan 23 08:48:25 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 08:48:25 crc kubenswrapper[5102]: > Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.443748 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-b7bbd"] Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.449441 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.457517 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7bbd"] Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.644322 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8ktmh\" (UniqueName: \"kubernetes.io/projected/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-kube-api-access-8ktmh\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.644801 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-catalog-content\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.644917 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-utilities\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.746114 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8ktmh\" (UniqueName: \"kubernetes.io/projected/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-kube-api-access-8ktmh\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.746193 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-catalog-content\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.746215 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-utilities\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.747268 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-catalog-content\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.747630 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-utilities\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:32 crc kubenswrapper[5102]: I0123 08:48:32.772471 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8ktmh\" (UniqueName: \"kubernetes.io/projected/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-kube-api-access-8ktmh\") pod \"redhat-marketplace-b7bbd\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:33 crc kubenswrapper[5102]: I0123 08:48:33.070248 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:33 crc kubenswrapper[5102]: I0123 08:48:33.513491 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7bbd"] Jan 23 08:48:33 crc kubenswrapper[5102]: W0123 08:48:33.519724 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2ec5f4e4_ec21_4e9b_87b0_c9524d8858c8.slice/crio-770527f4d2145e27dd3bddf8758108099931649011e137948e375f251c8bc4e7 WatchSource:0}: Error finding container 770527f4d2145e27dd3bddf8758108099931649011e137948e375f251c8bc4e7: Status 404 returned error can't find the container with id 770527f4d2145e27dd3bddf8758108099931649011e137948e375f251c8bc4e7 Jan 23 08:48:34 crc kubenswrapper[5102]: I0123 08:48:34.254208 5102 generic.go:334] "Generic (PLEG): container finished" podID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerID="60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79" exitCode=0 Jan 23 08:48:34 crc kubenswrapper[5102]: I0123 08:48:34.254251 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7bbd" event={"ID":"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8","Type":"ContainerDied","Data":"60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79"} Jan 23 08:48:34 crc kubenswrapper[5102]: I0123 08:48:34.254273 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7bbd" event={"ID":"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8","Type":"ContainerStarted","Data":"770527f4d2145e27dd3bddf8758108099931649011e137948e375f251c8bc4e7"} Jan 23 08:48:34 crc kubenswrapper[5102]: I0123 08:48:34.748986 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:34 crc kubenswrapper[5102]: I0123 08:48:34.794957 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:35 crc kubenswrapper[5102]: I0123 08:48:35.263169 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7bbd" event={"ID":"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8","Type":"ContainerStarted","Data":"73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb"} Jan 23 08:48:35 crc kubenswrapper[5102]: I0123 08:48:35.598228 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:48:35 crc kubenswrapper[5102]: E0123 08:48:35.598735 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:48:36 crc kubenswrapper[5102]: I0123 08:48:36.271674 5102 generic.go:334] "Generic (PLEG): container finished" podID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerID="73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb" exitCode=0 Jan 23 08:48:36 crc kubenswrapper[5102]: I0123 08:48:36.271807 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7bbd" event={"ID":"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8","Type":"ContainerDied","Data":"73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb"} Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.024817 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz6nt"] Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.025470 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-zz6nt" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="registry-server" containerID="cri-o://d52d3e40022a998e4decb75d0cf916d5c63d8770c18717275cb227ca3ba31515" gracePeriod=2 Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.299044 5102 generic.go:334] "Generic (PLEG): container finished" podID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerID="d52d3e40022a998e4decb75d0cf916d5c63d8770c18717275cb227ca3ba31515" exitCode=0 Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.299090 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6nt" event={"ID":"4965c67c-fe24-459f-ad5f-14d2d222ca83","Type":"ContainerDied","Data":"d52d3e40022a998e4decb75d0cf916d5c63d8770c18717275cb227ca3ba31515"} Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.303144 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7bbd" event={"ID":"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8","Type":"ContainerStarted","Data":"f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30"} Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.336399 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-b7bbd" podStartSLOduration=2.929180233 podStartE2EDuration="5.336376401s" podCreationTimestamp="2026-01-23 08:48:32 +0000 UTC" firstStartedPulling="2026-01-23 08:48:34.259222549 +0000 UTC m=+6865.079571524" lastFinishedPulling="2026-01-23 08:48:36.666418717 +0000 UTC m=+6867.486767692" observedRunningTime="2026-01-23 08:48:37.330647904 +0000 UTC m=+6868.150996909" watchObservedRunningTime="2026-01-23 08:48:37.336376401 +0000 UTC m=+6868.156725376" Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.431026 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.617162 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-utilities\") pod \"4965c67c-fe24-459f-ad5f-14d2d222ca83\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.617337 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-catalog-content\") pod \"4965c67c-fe24-459f-ad5f-14d2d222ca83\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.617437 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qwdh7\" (UniqueName: \"kubernetes.io/projected/4965c67c-fe24-459f-ad5f-14d2d222ca83-kube-api-access-qwdh7\") pod \"4965c67c-fe24-459f-ad5f-14d2d222ca83\" (UID: \"4965c67c-fe24-459f-ad5f-14d2d222ca83\") " Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.620060 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-utilities" (OuterVolumeSpecName: "utilities") pod "4965c67c-fe24-459f-ad5f-14d2d222ca83" (UID: "4965c67c-fe24-459f-ad5f-14d2d222ca83"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.627289 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4965c67c-fe24-459f-ad5f-14d2d222ca83-kube-api-access-qwdh7" (OuterVolumeSpecName: "kube-api-access-qwdh7") pod "4965c67c-fe24-459f-ad5f-14d2d222ca83" (UID: "4965c67c-fe24-459f-ad5f-14d2d222ca83"). InnerVolumeSpecName "kube-api-access-qwdh7". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.720040 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.720106 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qwdh7\" (UniqueName: \"kubernetes.io/projected/4965c67c-fe24-459f-ad5f-14d2d222ca83-kube-api-access-qwdh7\") on node \"crc\" DevicePath \"\"" Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.764360 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "4965c67c-fe24-459f-ad5f-14d2d222ca83" (UID: "4965c67c-fe24-459f-ad5f-14d2d222ca83"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:48:37 crc kubenswrapper[5102]: I0123 08:48:37.821069 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4965c67c-fe24-459f-ad5f-14d2d222ca83-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:48:38 crc kubenswrapper[5102]: I0123 08:48:38.314505 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-zz6nt" event={"ID":"4965c67c-fe24-459f-ad5f-14d2d222ca83","Type":"ContainerDied","Data":"d9e1a2114d9e6a91781e4928534397dbdaeb2ea0de8ddecf5a9167c63dca877c"} Jan 23 08:48:38 crc kubenswrapper[5102]: I0123 08:48:38.314658 5102 scope.go:117] "RemoveContainer" containerID="d52d3e40022a998e4decb75d0cf916d5c63d8770c18717275cb227ca3ba31515" Jan 23 08:48:38 crc kubenswrapper[5102]: I0123 08:48:38.314698 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-zz6nt" Jan 23 08:48:38 crc kubenswrapper[5102]: I0123 08:48:38.335405 5102 scope.go:117] "RemoveContainer" containerID="1cb922414ad2e77d71075ccad482e85f208bb4e04d23d4553e4a02438a5ee713" Jan 23 08:48:38 crc kubenswrapper[5102]: I0123 08:48:38.359891 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-zz6nt"] Jan 23 08:48:38 crc kubenswrapper[5102]: I0123 08:48:38.366181 5102 scope.go:117] "RemoveContainer" containerID="e45d73d3530f1590dba7075b50e2927fe40c220d81f8db0aab1a85dbbf4aa770" Jan 23 08:48:38 crc kubenswrapper[5102]: I0123 08:48:38.368779 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-zz6nt"] Jan 23 08:48:39 crc kubenswrapper[5102]: I0123 08:48:39.619187 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" path="/var/lib/kubelet/pods/4965c67c-fe24-459f-ad5f-14d2d222ca83/volumes" Jan 23 08:48:43 crc kubenswrapper[5102]: I0123 08:48:43.071602 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:43 crc kubenswrapper[5102]: I0123 08:48:43.073147 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:43 crc kubenswrapper[5102]: I0123 08:48:43.140047 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:43 crc kubenswrapper[5102]: I0123 08:48:43.459265 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:43 crc kubenswrapper[5102]: I0123 08:48:43.505595 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7bbd"] Jan 23 08:48:45 crc kubenswrapper[5102]: I0123 08:48:45.393897 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-b7bbd" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="registry-server" containerID="cri-o://f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30" gracePeriod=2 Jan 23 08:48:45 crc kubenswrapper[5102]: I0123 08:48:45.878578 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.059368 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-utilities\") pod \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.059515 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8ktmh\" (UniqueName: \"kubernetes.io/projected/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-kube-api-access-8ktmh\") pod \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.059766 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-catalog-content\") pod \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\" (UID: \"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8\") " Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.060827 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-utilities" (OuterVolumeSpecName: "utilities") pod "2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" (UID: "2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.066971 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-kube-api-access-8ktmh" (OuterVolumeSpecName: "kube-api-access-8ktmh") pod "2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" (UID: "2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8"). InnerVolumeSpecName "kube-api-access-8ktmh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.086699 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" (UID: "2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.161582 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8ktmh\" (UniqueName: \"kubernetes.io/projected/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-kube-api-access-8ktmh\") on node \"crc\" DevicePath \"\"" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.161622 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.161638 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.412156 5102 generic.go:334] "Generic (PLEG): container finished" podID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerID="f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30" exitCode=0 Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.412216 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7bbd" event={"ID":"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8","Type":"ContainerDied","Data":"f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30"} Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.412255 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-b7bbd" event={"ID":"2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8","Type":"ContainerDied","Data":"770527f4d2145e27dd3bddf8758108099931649011e137948e375f251c8bc4e7"} Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.412255 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-b7bbd" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.412283 5102 scope.go:117] "RemoveContainer" containerID="f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.439917 5102 scope.go:117] "RemoveContainer" containerID="73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.455331 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7bbd"] Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.460623 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-b7bbd"] Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.472586 5102 scope.go:117] "RemoveContainer" containerID="60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.491191 5102 scope.go:117] "RemoveContainer" containerID="f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30" Jan 23 08:48:46 crc kubenswrapper[5102]: E0123 08:48:46.491906 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30\": container with ID starting with f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30 not found: ID does not exist" containerID="f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.491990 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30"} err="failed to get container status \"f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30\": rpc error: code = NotFound desc = could not find container \"f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30\": container with ID starting with f00731f073febcec74ff50b613481115939b853950ef2c772e084c33851edc30 not found: ID does not exist" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.492043 5102 scope.go:117] "RemoveContainer" containerID="73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb" Jan 23 08:48:46 crc kubenswrapper[5102]: E0123 08:48:46.492692 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb\": container with ID starting with 73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb not found: ID does not exist" containerID="73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.492720 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb"} err="failed to get container status \"73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb\": rpc error: code = NotFound desc = could not find container \"73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb\": container with ID starting with 73c0badfd5a6c5b9d9f198c0b91065fadf867c77bba9141767edb9a7e5ec3fdb not found: ID does not exist" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.492767 5102 scope.go:117] "RemoveContainer" containerID="60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79" Jan 23 08:48:46 crc kubenswrapper[5102]: E0123 08:48:46.493131 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79\": container with ID starting with 60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79 not found: ID does not exist" containerID="60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.493159 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79"} err="failed to get container status \"60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79\": rpc error: code = NotFound desc = could not find container \"60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79\": container with ID starting with 60c468529784e74c5fd29a0f04bbec1c719d6414bd57f088e7997cd9d4069a79 not found: ID does not exist" Jan 23 08:48:46 crc kubenswrapper[5102]: I0123 08:48:46.598797 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:48:46 crc kubenswrapper[5102]: E0123 08:48:46.599198 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:48:47 crc kubenswrapper[5102]: I0123 08:48:47.615582 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" path="/var/lib/kubelet/pods/2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8/volumes" Jan 23 08:48:59 crc kubenswrapper[5102]: I0123 08:48:59.609810 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:48:59 crc kubenswrapper[5102]: E0123 08:48:59.610974 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:49:11 crc kubenswrapper[5102]: I0123 08:49:11.598432 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:49:11 crc kubenswrapper[5102]: E0123 08:49:11.599640 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:49:24 crc kubenswrapper[5102]: I0123 08:49:24.598423 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:49:24 crc kubenswrapper[5102]: E0123 08:49:24.599375 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:49:36 crc kubenswrapper[5102]: I0123 08:49:36.597839 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:49:36 crc kubenswrapper[5102]: E0123 08:49:36.598763 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:49:49 crc kubenswrapper[5102]: I0123 08:49:49.603085 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:49:49 crc kubenswrapper[5102]: E0123 08:49:49.604147 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:50:02 crc kubenswrapper[5102]: I0123 08:50:02.597948 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:50:02 crc kubenswrapper[5102]: E0123 08:50:02.598716 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:50:15 crc kubenswrapper[5102]: I0123 08:50:15.598961 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:50:15 crc kubenswrapper[5102]: E0123 08:50:15.600007 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:50:27 crc kubenswrapper[5102]: I0123 08:50:27.598674 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:50:27 crc kubenswrapper[5102]: E0123 08:50:27.599423 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:50:42 crc kubenswrapper[5102]: I0123 08:50:42.598279 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:50:42 crc kubenswrapper[5102]: E0123 08:50:42.599179 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:50:53 crc kubenswrapper[5102]: I0123 08:50:53.598492 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:50:53 crc kubenswrapper[5102]: E0123 08:50:53.599464 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:51:04 crc kubenswrapper[5102]: I0123 08:51:04.598262 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:51:04 crc kubenswrapper[5102]: E0123 08:51:04.599309 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:51:17 crc kubenswrapper[5102]: I0123 08:51:17.599170 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:51:18 crc kubenswrapper[5102]: I0123 08:51:18.396244 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"84de42ea50cd8ce1283bc16736ac9825425c3fb7b7ba8fbbf0651232b6de97cf"} Jan 23 08:53:46 crc kubenswrapper[5102]: I0123 08:53:46.768501 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:53:46 crc kubenswrapper[5102]: I0123 08:53:46.769024 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:54:16 crc kubenswrapper[5102]: I0123 08:54:16.769437 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:54:16 crc kubenswrapper[5102]: I0123 08:54:16.770090 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.492252 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-9dcxt"] Jan 23 08:54:30 crc kubenswrapper[5102]: E0123 08:54:30.493228 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="extract-utilities" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493244 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="extract-utilities" Jan 23 08:54:30 crc kubenswrapper[5102]: E0123 08:54:30.493259 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="extract-content" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493266 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="extract-content" Jan 23 08:54:30 crc kubenswrapper[5102]: E0123 08:54:30.493282 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="extract-utilities" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493289 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="extract-utilities" Jan 23 08:54:30 crc kubenswrapper[5102]: E0123 08:54:30.493302 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="extract-content" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493309 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="extract-content" Jan 23 08:54:30 crc kubenswrapper[5102]: E0123 08:54:30.493328 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="registry-server" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493335 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="registry-server" Jan 23 08:54:30 crc kubenswrapper[5102]: E0123 08:54:30.493355 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="registry-server" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493362 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="registry-server" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493584 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="4965c67c-fe24-459f-ad5f-14d2d222ca83" containerName="registry-server" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.493599 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2ec5f4e4-ec21-4e9b-87b0-c9524d8858c8" containerName="registry-server" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.494754 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.516064 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9dcxt"] Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.678174 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-utilities\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.678480 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kdv2d\" (UniqueName: \"kubernetes.io/projected/2375b97f-507c-4b64-bee3-3604b57949b5-kube-api-access-kdv2d\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.678620 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-catalog-content\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.780312 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kdv2d\" (UniqueName: \"kubernetes.io/projected/2375b97f-507c-4b64-bee3-3604b57949b5-kube-api-access-kdv2d\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.780435 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-catalog-content\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.780501 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-utilities\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.781681 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-catalog-content\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.781702 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-utilities\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.804951 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kdv2d\" (UniqueName: \"kubernetes.io/projected/2375b97f-507c-4b64-bee3-3604b57949b5-kube-api-access-kdv2d\") pod \"community-operators-9dcxt\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:30 crc kubenswrapper[5102]: I0123 08:54:30.820592 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:31 crc kubenswrapper[5102]: I0123 08:54:31.316312 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-9dcxt"] Jan 23 08:54:31 crc kubenswrapper[5102]: I0123 08:54:31.848276 5102 generic.go:334] "Generic (PLEG): container finished" podID="2375b97f-507c-4b64-bee3-3604b57949b5" containerID="ad7c992057cfcb56fb1264096463fc4a1dcbe64678ccfacd719c55d6ee951aa4" exitCode=0 Jan 23 08:54:31 crc kubenswrapper[5102]: I0123 08:54:31.848319 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9dcxt" event={"ID":"2375b97f-507c-4b64-bee3-3604b57949b5","Type":"ContainerDied","Data":"ad7c992057cfcb56fb1264096463fc4a1dcbe64678ccfacd719c55d6ee951aa4"} Jan 23 08:54:31 crc kubenswrapper[5102]: I0123 08:54:31.848363 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9dcxt" event={"ID":"2375b97f-507c-4b64-bee3-3604b57949b5","Type":"ContainerStarted","Data":"05b0d91431aacefe9ae4c8e7898a90f81f643a849a2472bef82f344a1e0bce57"} Jan 23 08:54:31 crc kubenswrapper[5102]: I0123 08:54:31.851835 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 08:54:33 crc kubenswrapper[5102]: I0123 08:54:33.864695 5102 generic.go:334] "Generic (PLEG): container finished" podID="2375b97f-507c-4b64-bee3-3604b57949b5" containerID="8d53a07d49bb24495b40e0df740fee2ac118a389112712787b04f65311a48278" exitCode=0 Jan 23 08:54:33 crc kubenswrapper[5102]: I0123 08:54:33.864738 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9dcxt" event={"ID":"2375b97f-507c-4b64-bee3-3604b57949b5","Type":"ContainerDied","Data":"8d53a07d49bb24495b40e0df740fee2ac118a389112712787b04f65311a48278"} Jan 23 08:54:34 crc kubenswrapper[5102]: I0123 08:54:34.876044 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9dcxt" event={"ID":"2375b97f-507c-4b64-bee3-3604b57949b5","Type":"ContainerStarted","Data":"d3afee7341ee0156017c740fe8b803029b88208724b8c41c975c4b224e53ce5a"} Jan 23 08:54:34 crc kubenswrapper[5102]: I0123 08:54:34.905308 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-9dcxt" podStartSLOduration=2.343374969 podStartE2EDuration="4.905278379s" podCreationTimestamp="2026-01-23 08:54:30 +0000 UTC" firstStartedPulling="2026-01-23 08:54:31.851585491 +0000 UTC m=+7222.671934466" lastFinishedPulling="2026-01-23 08:54:34.413488891 +0000 UTC m=+7225.233837876" observedRunningTime="2026-01-23 08:54:34.898355095 +0000 UTC m=+7225.718704080" watchObservedRunningTime="2026-01-23 08:54:34.905278379 +0000 UTC m=+7225.725627354" Jan 23 08:54:40 crc kubenswrapper[5102]: I0123 08:54:40.821150 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:40 crc kubenswrapper[5102]: I0123 08:54:40.821439 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:40 crc kubenswrapper[5102]: I0123 08:54:40.862656 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:40 crc kubenswrapper[5102]: I0123 08:54:40.955257 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:44 crc kubenswrapper[5102]: I0123 08:54:44.280464 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9dcxt"] Jan 23 08:54:44 crc kubenswrapper[5102]: I0123 08:54:44.281188 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-9dcxt" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="registry-server" containerID="cri-o://d3afee7341ee0156017c740fe8b803029b88208724b8c41c975c4b224e53ce5a" gracePeriod=2 Jan 23 08:54:45 crc kubenswrapper[5102]: I0123 08:54:45.961815 5102 generic.go:334] "Generic (PLEG): container finished" podID="2375b97f-507c-4b64-bee3-3604b57949b5" containerID="d3afee7341ee0156017c740fe8b803029b88208724b8c41c975c4b224e53ce5a" exitCode=0 Jan 23 08:54:45 crc kubenswrapper[5102]: I0123 08:54:45.961918 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9dcxt" event={"ID":"2375b97f-507c-4b64-bee3-3604b57949b5","Type":"ContainerDied","Data":"d3afee7341ee0156017c740fe8b803029b88208724b8c41c975c4b224e53ce5a"} Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.519883 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.552246 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-utilities\") pod \"2375b97f-507c-4b64-bee3-3604b57949b5\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.552350 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kdv2d\" (UniqueName: \"kubernetes.io/projected/2375b97f-507c-4b64-bee3-3604b57949b5-kube-api-access-kdv2d\") pod \"2375b97f-507c-4b64-bee3-3604b57949b5\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.552449 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-catalog-content\") pod \"2375b97f-507c-4b64-bee3-3604b57949b5\" (UID: \"2375b97f-507c-4b64-bee3-3604b57949b5\") " Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.554179 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-utilities" (OuterVolumeSpecName: "utilities") pod "2375b97f-507c-4b64-bee3-3604b57949b5" (UID: "2375b97f-507c-4b64-bee3-3604b57949b5"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.559620 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2375b97f-507c-4b64-bee3-3604b57949b5-kube-api-access-kdv2d" (OuterVolumeSpecName: "kube-api-access-kdv2d") pod "2375b97f-507c-4b64-bee3-3604b57949b5" (UID: "2375b97f-507c-4b64-bee3-3604b57949b5"). InnerVolumeSpecName "kube-api-access-kdv2d". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.608487 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2375b97f-507c-4b64-bee3-3604b57949b5" (UID: "2375b97f-507c-4b64-bee3-3604b57949b5"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.653177 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.653216 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2375b97f-507c-4b64-bee3-3604b57949b5-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.653228 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kdv2d\" (UniqueName: \"kubernetes.io/projected/2375b97f-507c-4b64-bee3-3604b57949b5-kube-api-access-kdv2d\") on node \"crc\" DevicePath \"\"" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.769250 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.769369 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.769454 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.770496 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"84de42ea50cd8ce1283bc16736ac9825425c3fb7b7ba8fbbf0651232b6de97cf"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.770677 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://84de42ea50cd8ce1283bc16736ac9825425c3fb7b7ba8fbbf0651232b6de97cf" gracePeriod=600 Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.971058 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-9dcxt" event={"ID":"2375b97f-507c-4b64-bee3-3604b57949b5","Type":"ContainerDied","Data":"05b0d91431aacefe9ae4c8e7898a90f81f643a849a2472bef82f344a1e0bce57"} Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.971116 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-9dcxt" Jan 23 08:54:46 crc kubenswrapper[5102]: I0123 08:54:46.971127 5102 scope.go:117] "RemoveContainer" containerID="d3afee7341ee0156017c740fe8b803029b88208724b8c41c975c4b224e53ce5a" Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.003953 5102 scope.go:117] "RemoveContainer" containerID="8d53a07d49bb24495b40e0df740fee2ac118a389112712787b04f65311a48278" Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.004931 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-9dcxt"] Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.011420 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-9dcxt"] Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.023357 5102 scope.go:117] "RemoveContainer" containerID="ad7c992057cfcb56fb1264096463fc4a1dcbe64678ccfacd719c55d6ee951aa4" Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.617751 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" path="/var/lib/kubelet/pods/2375b97f-507c-4b64-bee3-3604b57949b5/volumes" Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.986080 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="84de42ea50cd8ce1283bc16736ac9825425c3fb7b7ba8fbbf0651232b6de97cf" exitCode=0 Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.986146 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"84de42ea50cd8ce1283bc16736ac9825425c3fb7b7ba8fbbf0651232b6de97cf"} Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.986246 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b"} Jan 23 08:54:47 crc kubenswrapper[5102]: I0123 08:54:47.986276 5102 scope.go:117] "RemoveContainer" containerID="a1dca46da6b778f92e7dbf3ace0e5f0a196321203bfab6e1caf36703346b4db8" Jan 23 08:57:16 crc kubenswrapper[5102]: I0123 08:57:16.768527 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:57:16 crc kubenswrapper[5102]: I0123 08:57:16.769111 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:57:46 crc kubenswrapper[5102]: I0123 08:57:46.768196 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:57:46 crc kubenswrapper[5102]: I0123 08:57:46.768676 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:58:16 crc kubenswrapper[5102]: I0123 08:58:16.768628 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 08:58:16 crc kubenswrapper[5102]: I0123 08:58:16.769149 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 08:58:16 crc kubenswrapper[5102]: I0123 08:58:16.769193 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 08:58:16 crc kubenswrapper[5102]: I0123 08:58:16.769893 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 08:58:16 crc kubenswrapper[5102]: I0123 08:58:16.769943 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" gracePeriod=600 Jan 23 08:58:16 crc kubenswrapper[5102]: E0123 08:58:16.900281 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:58:17 crc kubenswrapper[5102]: I0123 08:58:17.661074 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" exitCode=0 Jan 23 08:58:17 crc kubenswrapper[5102]: I0123 08:58:17.661152 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b"} Jan 23 08:58:17 crc kubenswrapper[5102]: I0123 08:58:17.661258 5102 scope.go:117] "RemoveContainer" containerID="84de42ea50cd8ce1283bc16736ac9825425c3fb7b7ba8fbbf0651232b6de97cf" Jan 23 08:58:17 crc kubenswrapper[5102]: I0123 08:58:17.662237 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:58:17 crc kubenswrapper[5102]: E0123 08:58:17.662819 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:58:32 crc kubenswrapper[5102]: I0123 08:58:32.598658 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:58:32 crc kubenswrapper[5102]: E0123 08:58:32.599230 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:58:45 crc kubenswrapper[5102]: I0123 08:58:45.597652 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:58:45 crc kubenswrapper[5102]: E0123 08:58:45.598307 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:59:00 crc kubenswrapper[5102]: I0123 08:59:00.599355 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:59:00 crc kubenswrapper[5102]: E0123 08:59:00.600940 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.006983 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cwzhs"] Jan 23 08:59:03 crc kubenswrapper[5102]: E0123 08:59:03.007458 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="extract-content" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.007480 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="extract-content" Jan 23 08:59:03 crc kubenswrapper[5102]: E0123 08:59:03.007496 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="registry-server" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.007503 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="registry-server" Jan 23 08:59:03 crc kubenswrapper[5102]: E0123 08:59:03.007514 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="extract-utilities" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.007521 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="extract-utilities" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.007693 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2375b97f-507c-4b64-bee3-3604b57949b5" containerName="registry-server" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.009290 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.026976 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cwzhs"] Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.140145 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-catalog-content\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.140350 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlpb6\" (UniqueName: \"kubernetes.io/projected/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-kube-api-access-vlpb6\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.140388 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-utilities\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.241400 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-catalog-content\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.241476 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlpb6\" (UniqueName: \"kubernetes.io/projected/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-kube-api-access-vlpb6\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.241503 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-utilities\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.242115 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-utilities\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.242333 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-catalog-content\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.266784 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlpb6\" (UniqueName: \"kubernetes.io/projected/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-kube-api-access-vlpb6\") pod \"redhat-operators-cwzhs\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.337978 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:03 crc kubenswrapper[5102]: I0123 08:59:03.827770 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cwzhs"] Jan 23 08:59:04 crc kubenswrapper[5102]: I0123 08:59:04.052649 5102 generic.go:334] "Generic (PLEG): container finished" podID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerID="d932e5a68d608462f3f2296a569da6c345fb4c9e8b5fbf540629cf5555f0603a" exitCode=0 Jan 23 08:59:04 crc kubenswrapper[5102]: I0123 08:59:04.052700 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwzhs" event={"ID":"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626","Type":"ContainerDied","Data":"d932e5a68d608462f3f2296a569da6c345fb4c9e8b5fbf540629cf5555f0603a"} Jan 23 08:59:04 crc kubenswrapper[5102]: I0123 08:59:04.052733 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwzhs" event={"ID":"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626","Type":"ContainerStarted","Data":"4a528d0a6aa5fd710810d70602024e1da902e1294f34e130d6a7a9062b42b6e6"} Jan 23 08:59:05 crc kubenswrapper[5102]: I0123 08:59:05.061104 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwzhs" event={"ID":"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626","Type":"ContainerStarted","Data":"59384fbd4497b97210739f2d59cd9c6e43ae5ff2af4781693e8ac8c5169e3b8a"} Jan 23 08:59:06 crc kubenswrapper[5102]: I0123 08:59:06.070892 5102 generic.go:334] "Generic (PLEG): container finished" podID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerID="59384fbd4497b97210739f2d59cd9c6e43ae5ff2af4781693e8ac8c5169e3b8a" exitCode=0 Jan 23 08:59:06 crc kubenswrapper[5102]: I0123 08:59:06.071104 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwzhs" event={"ID":"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626","Type":"ContainerDied","Data":"59384fbd4497b97210739f2d59cd9c6e43ae5ff2af4781693e8ac8c5169e3b8a"} Jan 23 08:59:07 crc kubenswrapper[5102]: I0123 08:59:07.080051 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwzhs" event={"ID":"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626","Type":"ContainerStarted","Data":"f2b054d4833614317db5d6118ee62d0c52db3a3154cc4395118fcfad0bd3df21"} Jan 23 08:59:07 crc kubenswrapper[5102]: I0123 08:59:07.099460 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cwzhs" podStartSLOduration=2.409080494 podStartE2EDuration="5.099438176s" podCreationTimestamp="2026-01-23 08:59:02 +0000 UTC" firstStartedPulling="2026-01-23 08:59:04.054837319 +0000 UTC m=+7494.875186294" lastFinishedPulling="2026-01-23 08:59:06.745194961 +0000 UTC m=+7497.565543976" observedRunningTime="2026-01-23 08:59:07.09925854 +0000 UTC m=+7497.919607515" watchObservedRunningTime="2026-01-23 08:59:07.099438176 +0000 UTC m=+7497.919787161" Jan 23 08:59:13 crc kubenswrapper[5102]: I0123 08:59:13.338373 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:13 crc kubenswrapper[5102]: I0123 08:59:13.339216 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:13 crc kubenswrapper[5102]: I0123 08:59:13.390874 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:13 crc kubenswrapper[5102]: I0123 08:59:13.597871 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:59:13 crc kubenswrapper[5102]: E0123 08:59:13.598322 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.173434 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.488368 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ctfsz"] Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.490676 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.505477 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ctfsz"] Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.617060 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-utilities\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.617132 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zrsg2\" (UniqueName: \"kubernetes.io/projected/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-kube-api-access-zrsg2\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.617193 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-catalog-content\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.719108 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-utilities\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.719186 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zrsg2\" (UniqueName: \"kubernetes.io/projected/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-kube-api-access-zrsg2\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.719242 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-catalog-content\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.719885 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-catalog-content\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.719893 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-utilities\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.749202 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zrsg2\" (UniqueName: \"kubernetes.io/projected/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-kube-api-access-zrsg2\") pod \"certified-operators-ctfsz\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:14 crc kubenswrapper[5102]: I0123 08:59:14.814788 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.141214 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ctfsz"] Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.480244 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8wgkc"] Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.485701 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.499581 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wgkc"] Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.632676 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x69bx\" (UniqueName: \"kubernetes.io/projected/86bfb643-ce78-48a4-8b3a-8a1841552699-kube-api-access-x69bx\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.632938 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-utilities\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.632962 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-catalog-content\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.733994 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x69bx\" (UniqueName: \"kubernetes.io/projected/86bfb643-ce78-48a4-8b3a-8a1841552699-kube-api-access-x69bx\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.734069 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-utilities\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.734097 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-catalog-content\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.734640 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-catalog-content\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.734726 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-utilities\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.757492 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x69bx\" (UniqueName: \"kubernetes.io/projected/86bfb643-ce78-48a4-8b3a-8a1841552699-kube-api-access-x69bx\") pod \"redhat-marketplace-8wgkc\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:15 crc kubenswrapper[5102]: I0123 08:59:15.815098 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:16 crc kubenswrapper[5102]: I0123 08:59:16.144048 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerStarted","Data":"d258a13f02219a3789e6b3b19f63aa9fec706da281e1afbf440c17e6c96eac03"} Jan 23 08:59:16 crc kubenswrapper[5102]: I0123 08:59:16.144414 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerStarted","Data":"b77a9d523cd32fa654eed0d9de621e325fc228a9b806528c1daef29ffd39137e"} Jan 23 08:59:16 crc kubenswrapper[5102]: I0123 08:59:16.279955 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wgkc"] Jan 23 08:59:16 crc kubenswrapper[5102]: W0123 08:59:16.286006 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod86bfb643_ce78_48a4_8b3a_8a1841552699.slice/crio-90dd10025db576c2bb998c9c29e4d181c16f835275fd43c143a01dfbff34df0a WatchSource:0}: Error finding container 90dd10025db576c2bb998c9c29e4d181c16f835275fd43c143a01dfbff34df0a: Status 404 returned error can't find the container with id 90dd10025db576c2bb998c9c29e4d181c16f835275fd43c143a01dfbff34df0a Jan 23 08:59:16 crc kubenswrapper[5102]: I0123 08:59:16.878071 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cwzhs"] Jan 23 08:59:16 crc kubenswrapper[5102]: I0123 08:59:16.878467 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cwzhs" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="registry-server" containerID="cri-o://f2b054d4833614317db5d6118ee62d0c52db3a3154cc4395118fcfad0bd3df21" gracePeriod=2 Jan 23 08:59:17 crc kubenswrapper[5102]: I0123 08:59:17.154384 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wgkc" event={"ID":"86bfb643-ce78-48a4-8b3a-8a1841552699","Type":"ContainerStarted","Data":"90dd10025db576c2bb998c9c29e4d181c16f835275fd43c143a01dfbff34df0a"} Jan 23 08:59:18 crc kubenswrapper[5102]: I0123 08:59:18.169937 5102 generic.go:334] "Generic (PLEG): container finished" podID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerID="f2b054d4833614317db5d6118ee62d0c52db3a3154cc4395118fcfad0bd3df21" exitCode=0 Jan 23 08:59:18 crc kubenswrapper[5102]: I0123 08:59:18.170021 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwzhs" event={"ID":"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626","Type":"ContainerDied","Data":"f2b054d4833614317db5d6118ee62d0c52db3a3154cc4395118fcfad0bd3df21"} Jan 23 08:59:18 crc kubenswrapper[5102]: I0123 08:59:18.182395 5102 generic.go:334] "Generic (PLEG): container finished" podID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerID="d258a13f02219a3789e6b3b19f63aa9fec706da281e1afbf440c17e6c96eac03" exitCode=0 Jan 23 08:59:18 crc kubenswrapper[5102]: I0123 08:59:18.184626 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerDied","Data":"d258a13f02219a3789e6b3b19f63aa9fec706da281e1afbf440c17e6c96eac03"} Jan 23 08:59:18 crc kubenswrapper[5102]: I0123 08:59:18.190851 5102 generic.go:334] "Generic (PLEG): container finished" podID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerID="4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22" exitCode=0 Jan 23 08:59:18 crc kubenswrapper[5102]: I0123 08:59:18.190928 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wgkc" event={"ID":"86bfb643-ce78-48a4-8b3a-8a1841552699","Type":"ContainerDied","Data":"4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22"} Jan 23 08:59:18 crc kubenswrapper[5102]: I0123 08:59:18.969151 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.098741 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlpb6\" (UniqueName: \"kubernetes.io/projected/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-kube-api-access-vlpb6\") pod \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.098957 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-catalog-content\") pod \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.099085 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-utilities\") pod \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\" (UID: \"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626\") " Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.100507 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-utilities" (OuterVolumeSpecName: "utilities") pod "14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" (UID: "14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.111431 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-kube-api-access-vlpb6" (OuterVolumeSpecName: "kube-api-access-vlpb6") pod "14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" (UID: "14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626"). InnerVolumeSpecName "kube-api-access-vlpb6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.200482 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.200519 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlpb6\" (UniqueName: \"kubernetes.io/projected/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-kube-api-access-vlpb6\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.201377 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cwzhs" event={"ID":"14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626","Type":"ContainerDied","Data":"4a528d0a6aa5fd710810d70602024e1da902e1294f34e130d6a7a9062b42b6e6"} Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.201438 5102 scope.go:117] "RemoveContainer" containerID="f2b054d4833614317db5d6118ee62d0c52db3a3154cc4395118fcfad0bd3df21" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.201435 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cwzhs" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.202638 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerStarted","Data":"de0f6b3504618532f2f1a641243b7661c153a78e692d034fe963a743e8b21a6c"} Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.236053 5102 scope.go:117] "RemoveContainer" containerID="59384fbd4497b97210739f2d59cd9c6e43ae5ff2af4781693e8ac8c5169e3b8a" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.237279 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" (UID: "14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.262416 5102 scope.go:117] "RemoveContainer" containerID="d932e5a68d608462f3f2296a569da6c345fb4c9e8b5fbf540629cf5555f0603a" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.302051 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:19 crc kubenswrapper[5102]: E0123 08:59:19.452865 5102 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod176e72b8_f4e6_48dd_bcb3_c865a81e8ce7.slice/crio-conmon-de0f6b3504618532f2f1a641243b7661c153a78e692d034fe963a743e8b21a6c.scope\": RecentStats: unable to find data in memory cache]" Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.537131 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cwzhs"] Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.542904 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cwzhs"] Jan 23 08:59:19 crc kubenswrapper[5102]: I0123 08:59:19.612216 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" path="/var/lib/kubelet/pods/14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626/volumes" Jan 23 08:59:20 crc kubenswrapper[5102]: I0123 08:59:20.216198 5102 generic.go:334] "Generic (PLEG): container finished" podID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerID="de0f6b3504618532f2f1a641243b7661c153a78e692d034fe963a743e8b21a6c" exitCode=0 Jan 23 08:59:20 crc kubenswrapper[5102]: I0123 08:59:20.216254 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerDied","Data":"de0f6b3504618532f2f1a641243b7661c153a78e692d034fe963a743e8b21a6c"} Jan 23 08:59:21 crc kubenswrapper[5102]: I0123 08:59:21.227389 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerStarted","Data":"edf109412b24a4da269ba8e7a1dfed444fbc99dde1b8005aa9ae822ab0fac735"} Jan 23 08:59:21 crc kubenswrapper[5102]: I0123 08:59:21.260629 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ctfsz" podStartSLOduration=4.588661913 podStartE2EDuration="7.260611804s" podCreationTimestamp="2026-01-23 08:59:14 +0000 UTC" firstStartedPulling="2026-01-23 08:59:18.19102256 +0000 UTC m=+7509.011371575" lastFinishedPulling="2026-01-23 08:59:20.862972481 +0000 UTC m=+7511.683321466" observedRunningTime="2026-01-23 08:59:21.256772065 +0000 UTC m=+7512.077121070" watchObservedRunningTime="2026-01-23 08:59:21.260611804 +0000 UTC m=+7512.080960789" Jan 23 08:59:23 crc kubenswrapper[5102]: I0123 08:59:23.242819 5102 generic.go:334] "Generic (PLEG): container finished" podID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerID="de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103" exitCode=0 Jan 23 08:59:23 crc kubenswrapper[5102]: I0123 08:59:23.242935 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wgkc" event={"ID":"86bfb643-ce78-48a4-8b3a-8a1841552699","Type":"ContainerDied","Data":"de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103"} Jan 23 08:59:24 crc kubenswrapper[5102]: I0123 08:59:24.252655 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wgkc" event={"ID":"86bfb643-ce78-48a4-8b3a-8a1841552699","Type":"ContainerStarted","Data":"1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd"} Jan 23 08:59:24 crc kubenswrapper[5102]: I0123 08:59:24.274827 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8wgkc" podStartSLOduration=3.817770231 podStartE2EDuration="9.274804926s" podCreationTimestamp="2026-01-23 08:59:15 +0000 UTC" firstStartedPulling="2026-01-23 08:59:18.194110986 +0000 UTC m=+7509.014459961" lastFinishedPulling="2026-01-23 08:59:23.651145671 +0000 UTC m=+7514.471494656" observedRunningTime="2026-01-23 08:59:24.26880399 +0000 UTC m=+7515.089152975" watchObservedRunningTime="2026-01-23 08:59:24.274804926 +0000 UTC m=+7515.095153901" Jan 23 08:59:24 crc kubenswrapper[5102]: I0123 08:59:24.815031 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:24 crc kubenswrapper[5102]: I0123 08:59:24.815321 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:24 crc kubenswrapper[5102]: I0123 08:59:24.874651 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:25 crc kubenswrapper[5102]: I0123 08:59:25.815992 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:25 crc kubenswrapper[5102]: I0123 08:59:25.817315 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:25 crc kubenswrapper[5102]: I0123 08:59:25.872347 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:26 crc kubenswrapper[5102]: I0123 08:59:26.598734 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:59:26 crc kubenswrapper[5102]: E0123 08:59:26.599557 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:59:34 crc kubenswrapper[5102]: I0123 08:59:34.855450 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:34 crc kubenswrapper[5102]: I0123 08:59:34.905333 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ctfsz"] Jan 23 08:59:35 crc kubenswrapper[5102]: I0123 08:59:35.338865 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ctfsz" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="registry-server" containerID="cri-o://edf109412b24a4da269ba8e7a1dfed444fbc99dde1b8005aa9ae822ab0fac735" gracePeriod=2 Jan 23 08:59:35 crc kubenswrapper[5102]: I0123 08:59:35.862564 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.348964 5102 generic.go:334] "Generic (PLEG): container finished" podID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerID="edf109412b24a4da269ba8e7a1dfed444fbc99dde1b8005aa9ae822ab0fac735" exitCode=0 Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.349013 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerDied","Data":"edf109412b24a4da269ba8e7a1dfed444fbc99dde1b8005aa9ae822ab0fac735"} Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.497843 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wgkc"] Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.498078 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8wgkc" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="registry-server" containerID="cri-o://1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd" gracePeriod=2 Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.828363 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.898842 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.967215 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-utilities\") pod \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.967319 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x69bx\" (UniqueName: \"kubernetes.io/projected/86bfb643-ce78-48a4-8b3a-8a1841552699-kube-api-access-x69bx\") pod \"86bfb643-ce78-48a4-8b3a-8a1841552699\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.967341 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-catalog-content\") pod \"86bfb643-ce78-48a4-8b3a-8a1841552699\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.967361 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-catalog-content\") pod \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.967382 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-utilities\") pod \"86bfb643-ce78-48a4-8b3a-8a1841552699\" (UID: \"86bfb643-ce78-48a4-8b3a-8a1841552699\") " Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.967427 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zrsg2\" (UniqueName: \"kubernetes.io/projected/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-kube-api-access-zrsg2\") pod \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\" (UID: \"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7\") " Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.968131 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-utilities" (OuterVolumeSpecName: "utilities") pod "176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" (UID: "176e72b8-f4e6-48dd-bcb3-c865a81e8ce7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.968842 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-utilities" (OuterVolumeSpecName: "utilities") pod "86bfb643-ce78-48a4-8b3a-8a1841552699" (UID: "86bfb643-ce78-48a4-8b3a-8a1841552699"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.972641 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/86bfb643-ce78-48a4-8b3a-8a1841552699-kube-api-access-x69bx" (OuterVolumeSpecName: "kube-api-access-x69bx") pod "86bfb643-ce78-48a4-8b3a-8a1841552699" (UID: "86bfb643-ce78-48a4-8b3a-8a1841552699"). InnerVolumeSpecName "kube-api-access-x69bx". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.974893 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-kube-api-access-zrsg2" (OuterVolumeSpecName: "kube-api-access-zrsg2") pod "176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" (UID: "176e72b8-f4e6-48dd-bcb3-c865a81e8ce7"). InnerVolumeSpecName "kube-api-access-zrsg2". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 08:59:36 crc kubenswrapper[5102]: I0123 08:59:36.996281 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "86bfb643-ce78-48a4-8b3a-8a1841552699" (UID: "86bfb643-ce78-48a4-8b3a-8a1841552699"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.023330 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" (UID: "176e72b8-f4e6-48dd-bcb3-c865a81e8ce7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.069460 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.069503 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x69bx\" (UniqueName: \"kubernetes.io/projected/86bfb643-ce78-48a4-8b3a-8a1841552699-kube-api-access-x69bx\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.069516 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.069526 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.069537 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/86bfb643-ce78-48a4-8b3a-8a1841552699-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.069580 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zrsg2\" (UniqueName: \"kubernetes.io/projected/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7-kube-api-access-zrsg2\") on node \"crc\" DevicePath \"\"" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.360419 5102 generic.go:334] "Generic (PLEG): container finished" podID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerID="1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd" exitCode=0 Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.360502 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wgkc" event={"ID":"86bfb643-ce78-48a4-8b3a-8a1841552699","Type":"ContainerDied","Data":"1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd"} Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.360531 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8wgkc" event={"ID":"86bfb643-ce78-48a4-8b3a-8a1841552699","Type":"ContainerDied","Data":"90dd10025db576c2bb998c9c29e4d181c16f835275fd43c143a01dfbff34df0a"} Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.360568 5102 scope.go:117] "RemoveContainer" containerID="1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.361095 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8wgkc" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.362963 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ctfsz" event={"ID":"176e72b8-f4e6-48dd-bcb3-c865a81e8ce7","Type":"ContainerDied","Data":"b77a9d523cd32fa654eed0d9de621e325fc228a9b806528c1daef29ffd39137e"} Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.363046 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ctfsz" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.391355 5102 scope.go:117] "RemoveContainer" containerID="de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.400404 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ctfsz"] Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.406043 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ctfsz"] Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.419571 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wgkc"] Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.422169 5102 scope.go:117] "RemoveContainer" containerID="4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.424401 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8wgkc"] Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.436956 5102 scope.go:117] "RemoveContainer" containerID="1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd" Jan 23 08:59:37 crc kubenswrapper[5102]: E0123 08:59:37.437380 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd\": container with ID starting with 1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd not found: ID does not exist" containerID="1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.437412 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd"} err="failed to get container status \"1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd\": rpc error: code = NotFound desc = could not find container \"1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd\": container with ID starting with 1c9cb716e332841bd7f076caa2670adcd404d9680a5e53a9ad201e2a0ae8b1cd not found: ID does not exist" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.437435 5102 scope.go:117] "RemoveContainer" containerID="de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103" Jan 23 08:59:37 crc kubenswrapper[5102]: E0123 08:59:37.437727 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103\": container with ID starting with de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103 not found: ID does not exist" containerID="de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.437749 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103"} err="failed to get container status \"de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103\": rpc error: code = NotFound desc = could not find container \"de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103\": container with ID starting with de83886e71d5d475366cb44c3c0537139c45c314ed460fa274257fbde3de9103 not found: ID does not exist" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.437760 5102 scope.go:117] "RemoveContainer" containerID="4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22" Jan 23 08:59:37 crc kubenswrapper[5102]: E0123 08:59:37.438023 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22\": container with ID starting with 4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22 not found: ID does not exist" containerID="4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.438052 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22"} err="failed to get container status \"4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22\": rpc error: code = NotFound desc = could not find container \"4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22\": container with ID starting with 4d2b41e810b4b645c9bf4935c0417ac947e6ff2958fad84d642c8efc55136d22 not found: ID does not exist" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.438070 5102 scope.go:117] "RemoveContainer" containerID="edf109412b24a4da269ba8e7a1dfed444fbc99dde1b8005aa9ae822ab0fac735" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.452404 5102 scope.go:117] "RemoveContainer" containerID="de0f6b3504618532f2f1a641243b7661c153a78e692d034fe963a743e8b21a6c" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.469955 5102 scope.go:117] "RemoveContainer" containerID="d258a13f02219a3789e6b3b19f63aa9fec706da281e1afbf440c17e6c96eac03" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.606914 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" path="/var/lib/kubelet/pods/176e72b8-f4e6-48dd-bcb3-c865a81e8ce7/volumes" Jan 23 08:59:37 crc kubenswrapper[5102]: I0123 08:59:37.607768 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" path="/var/lib/kubelet/pods/86bfb643-ce78-48a4-8b3a-8a1841552699/volumes" Jan 23 08:59:39 crc kubenswrapper[5102]: I0123 08:59:39.601672 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:59:39 crc kubenswrapper[5102]: E0123 08:59:39.602235 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 08:59:51 crc kubenswrapper[5102]: I0123 08:59:51.598362 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 08:59:51 crc kubenswrapper[5102]: E0123 08:59:51.599227 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.182663 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj"] Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183479 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183495 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183505 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="extract-content" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183512 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="extract-content" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183525 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183532 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183565 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="extract-utilities" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183573 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="extract-utilities" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183587 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="extract-utilities" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183594 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="extract-utilities" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183606 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="extract-content" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183613 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="extract-content" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183626 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="extract-content" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183634 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="extract-content" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183643 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183651 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: E0123 09:00:00.183661 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="extract-utilities" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183667 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="extract-utilities" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183848 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="14ddf7e7-b713-4e4d-a4e3-b4b7f9c28626" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183866 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="86bfb643-ce78-48a4-8b3a-8a1841552699" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.183880 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="176e72b8-f4e6-48dd-bcb3-c865a81e8ce7" containerName="registry-server" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.184443 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.187007 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.190239 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.194929 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj"] Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.307853 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b8f25616-6d28-4af7-afeb-50164627f596-config-volume\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.308260 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b8f25616-6d28-4af7-afeb-50164627f596-secret-volume\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.308325 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-skw2v\" (UniqueName: \"kubernetes.io/projected/b8f25616-6d28-4af7-afeb-50164627f596-kube-api-access-skw2v\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.409633 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b8f25616-6d28-4af7-afeb-50164627f596-secret-volume\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.409734 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-skw2v\" (UniqueName: \"kubernetes.io/projected/b8f25616-6d28-4af7-afeb-50164627f596-kube-api-access-skw2v\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.409781 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b8f25616-6d28-4af7-afeb-50164627f596-config-volume\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.410693 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b8f25616-6d28-4af7-afeb-50164627f596-config-volume\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.415949 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b8f25616-6d28-4af7-afeb-50164627f596-secret-volume\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.428654 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-skw2v\" (UniqueName: \"kubernetes.io/projected/b8f25616-6d28-4af7-afeb-50164627f596-kube-api-access-skw2v\") pod \"collect-profiles-29485980-8lrpj\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.507864 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:00 crc kubenswrapper[5102]: I0123 09:00:00.915473 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj"] Jan 23 09:00:00 crc kubenswrapper[5102]: W0123 09:00:00.921176 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb8f25616_6d28_4af7_afeb_50164627f596.slice/crio-42651423e0b1a7010039c4c8059ab153f4b8da3dc93ff9c3642a3492be1b7e08 WatchSource:0}: Error finding container 42651423e0b1a7010039c4c8059ab153f4b8da3dc93ff9c3642a3492be1b7e08: Status 404 returned error can't find the container with id 42651423e0b1a7010039c4c8059ab153f4b8da3dc93ff9c3642a3492be1b7e08 Jan 23 09:00:01 crc kubenswrapper[5102]: I0123 09:00:01.571578 5102 generic.go:334] "Generic (PLEG): container finished" podID="b8f25616-6d28-4af7-afeb-50164627f596" containerID="681ab1f24efbefdc444be349655c16dd4b6bcb74d3f4e581184049f1d3676035" exitCode=0 Jan 23 09:00:01 crc kubenswrapper[5102]: I0123 09:00:01.571648 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" event={"ID":"b8f25616-6d28-4af7-afeb-50164627f596","Type":"ContainerDied","Data":"681ab1f24efbefdc444be349655c16dd4b6bcb74d3f4e581184049f1d3676035"} Jan 23 09:00:01 crc kubenswrapper[5102]: I0123 09:00:01.571892 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" event={"ID":"b8f25616-6d28-4af7-afeb-50164627f596","Type":"ContainerStarted","Data":"42651423e0b1a7010039c4c8059ab153f4b8da3dc93ff9c3642a3492be1b7e08"} Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.036162 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.148319 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-skw2v\" (UniqueName: \"kubernetes.io/projected/b8f25616-6d28-4af7-afeb-50164627f596-kube-api-access-skw2v\") pod \"b8f25616-6d28-4af7-afeb-50164627f596\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.148385 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b8f25616-6d28-4af7-afeb-50164627f596-secret-volume\") pod \"b8f25616-6d28-4af7-afeb-50164627f596\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.148402 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b8f25616-6d28-4af7-afeb-50164627f596-config-volume\") pod \"b8f25616-6d28-4af7-afeb-50164627f596\" (UID: \"b8f25616-6d28-4af7-afeb-50164627f596\") " Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.149244 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b8f25616-6d28-4af7-afeb-50164627f596-config-volume" (OuterVolumeSpecName: "config-volume") pod "b8f25616-6d28-4af7-afeb-50164627f596" (UID: "b8f25616-6d28-4af7-afeb-50164627f596"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.153612 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b8f25616-6d28-4af7-afeb-50164627f596-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b8f25616-6d28-4af7-afeb-50164627f596" (UID: "b8f25616-6d28-4af7-afeb-50164627f596"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.154579 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b8f25616-6d28-4af7-afeb-50164627f596-kube-api-access-skw2v" (OuterVolumeSpecName: "kube-api-access-skw2v") pod "b8f25616-6d28-4af7-afeb-50164627f596" (UID: "b8f25616-6d28-4af7-afeb-50164627f596"). InnerVolumeSpecName "kube-api-access-skw2v". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.249607 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-skw2v\" (UniqueName: \"kubernetes.io/projected/b8f25616-6d28-4af7-afeb-50164627f596-kube-api-access-skw2v\") on node \"crc\" DevicePath \"\"" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.249643 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b8f25616-6d28-4af7-afeb-50164627f596-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.249656 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b8f25616-6d28-4af7-afeb-50164627f596-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.591580 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" event={"ID":"b8f25616-6d28-4af7-afeb-50164627f596","Type":"ContainerDied","Data":"42651423e0b1a7010039c4c8059ab153f4b8da3dc93ff9c3642a3492be1b7e08"} Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.591620 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="42651423e0b1a7010039c4c8059ab153f4b8da3dc93ff9c3642a3492be1b7e08" Jan 23 09:00:03 crc kubenswrapper[5102]: I0123 09:00:03.591652 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485980-8lrpj" Jan 23 09:00:04 crc kubenswrapper[5102]: I0123 09:00:04.115434 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f"] Jan 23 09:00:04 crc kubenswrapper[5102]: I0123 09:00:04.124179 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485935-hzq6f"] Jan 23 09:00:04 crc kubenswrapper[5102]: I0123 09:00:04.598004 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:00:04 crc kubenswrapper[5102]: E0123 09:00:04.598393 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:00:05 crc kubenswrapper[5102]: I0123 09:00:05.610619 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5f3576b6-9001-41bf-986c-2389cc0ac574" path="/var/lib/kubelet/pods/5f3576b6-9001-41bf-986c-2389cc0ac574/volumes" Jan 23 09:00:17 crc kubenswrapper[5102]: I0123 09:00:17.843484 5102 scope.go:117] "RemoveContainer" containerID="bc252338db75d6ccff3fc3121fea70650b00195f84e4d6a8b79ed0b7affc72a6" Jan 23 09:00:19 crc kubenswrapper[5102]: I0123 09:00:19.602942 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:00:19 crc kubenswrapper[5102]: E0123 09:00:19.603726 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:00:31 crc kubenswrapper[5102]: I0123 09:00:31.601105 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:00:31 crc kubenswrapper[5102]: E0123 09:00:31.602625 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:00:45 crc kubenswrapper[5102]: I0123 09:00:45.598661 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:00:45 crc kubenswrapper[5102]: E0123 09:00:45.599773 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:00:58 crc kubenswrapper[5102]: I0123 09:00:58.598250 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:00:58 crc kubenswrapper[5102]: E0123 09:00:58.599197 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:01:11 crc kubenswrapper[5102]: I0123 09:01:11.598142 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:01:11 crc kubenswrapper[5102]: E0123 09:01:11.599347 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:01:25 crc kubenswrapper[5102]: I0123 09:01:25.599140 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:01:25 crc kubenswrapper[5102]: E0123 09:01:25.599965 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:01:37 crc kubenswrapper[5102]: I0123 09:01:37.599585 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:01:37 crc kubenswrapper[5102]: E0123 09:01:37.600854 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:01:50 crc kubenswrapper[5102]: I0123 09:01:50.598432 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:01:50 crc kubenswrapper[5102]: E0123 09:01:50.600502 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:02:04 crc kubenswrapper[5102]: I0123 09:02:04.598369 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:02:04 crc kubenswrapper[5102]: E0123 09:02:04.598996 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:02:19 crc kubenswrapper[5102]: I0123 09:02:19.604183 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:02:19 crc kubenswrapper[5102]: E0123 09:02:19.604978 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:02:32 crc kubenswrapper[5102]: I0123 09:02:32.598529 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:02:32 crc kubenswrapper[5102]: E0123 09:02:32.599188 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:02:46 crc kubenswrapper[5102]: I0123 09:02:46.598487 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:02:46 crc kubenswrapper[5102]: E0123 09:02:46.599322 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:03:00 crc kubenswrapper[5102]: I0123 09:03:00.597790 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:03:00 crc kubenswrapper[5102]: E0123 09:03:00.598585 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:03:15 crc kubenswrapper[5102]: I0123 09:03:15.599049 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:03:15 crc kubenswrapper[5102]: E0123 09:03:15.599936 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:03:27 crc kubenswrapper[5102]: I0123 09:03:27.598402 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:03:29 crc kubenswrapper[5102]: I0123 09:03:29.196491 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"af96fc80678d418fded2f3c9b09bc8d8708114510615c34caed1bc4c2b18c0f9"} Jan 23 09:05:46 crc kubenswrapper[5102]: I0123 09:05:46.768602 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:05:46 crc kubenswrapper[5102]: I0123 09:05:46.769214 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:06:16 crc kubenswrapper[5102]: I0123 09:06:16.768289 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:06:16 crc kubenswrapper[5102]: I0123 09:06:16.768790 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:06:46 crc kubenswrapper[5102]: I0123 09:06:46.768855 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:06:46 crc kubenswrapper[5102]: I0123 09:06:46.769338 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:06:46 crc kubenswrapper[5102]: I0123 09:06:46.769383 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 09:06:46 crc kubenswrapper[5102]: I0123 09:06:46.770161 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"af96fc80678d418fded2f3c9b09bc8d8708114510615c34caed1bc4c2b18c0f9"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 09:06:46 crc kubenswrapper[5102]: I0123 09:06:46.770213 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://af96fc80678d418fded2f3c9b09bc8d8708114510615c34caed1bc4c2b18c0f9" gracePeriod=600 Jan 23 09:06:47 crc kubenswrapper[5102]: I0123 09:06:47.914133 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="af96fc80678d418fded2f3c9b09bc8d8708114510615c34caed1bc4c2b18c0f9" exitCode=0 Jan 23 09:06:47 crc kubenswrapper[5102]: I0123 09:06:47.914502 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"af96fc80678d418fded2f3c9b09bc8d8708114510615c34caed1bc4c2b18c0f9"} Jan 23 09:06:47 crc kubenswrapper[5102]: I0123 09:06:47.914788 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066"} Jan 23 09:06:47 crc kubenswrapper[5102]: I0123 09:06:47.914813 5102 scope.go:117] "RemoveContainer" containerID="fbc347409ceed1253e9c276ae5090820498212b02207be83e8e1f3a2ce54fd5b" Jan 23 09:09:16 crc kubenswrapper[5102]: I0123 09:09:16.768156 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:09:16 crc kubenswrapper[5102]: I0123 09:09:16.768781 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.682170 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-8fqld"] Jan 23 09:09:24 crc kubenswrapper[5102]: E0123 09:09:24.683093 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b8f25616-6d28-4af7-afeb-50164627f596" containerName="collect-profiles" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.683112 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="b8f25616-6d28-4af7-afeb-50164627f596" containerName="collect-profiles" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.683279 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="b8f25616-6d28-4af7-afeb-50164627f596" containerName="collect-profiles" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.684577 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.699397 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fqld"] Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.820274 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-catalog-content\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.820332 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-utilities\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.820405 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sgvw6\" (UniqueName: \"kubernetes.io/projected/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-kube-api-access-sgvw6\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.921694 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-catalog-content\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.921752 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-utilities\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.921826 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sgvw6\" (UniqueName: \"kubernetes.io/projected/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-kube-api-access-sgvw6\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.922654 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-catalog-content\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.923689 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-utilities\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:24 crc kubenswrapper[5102]: I0123 09:09:24.961301 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sgvw6\" (UniqueName: \"kubernetes.io/projected/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-kube-api-access-sgvw6\") pod \"redhat-marketplace-8fqld\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:25 crc kubenswrapper[5102]: I0123 09:09:25.005983 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:25 crc kubenswrapper[5102]: I0123 09:09:25.271908 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fqld"] Jan 23 09:09:26 crc kubenswrapper[5102]: I0123 09:09:26.250350 5102 generic.go:334] "Generic (PLEG): container finished" podID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerID="8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e" exitCode=0 Jan 23 09:09:26 crc kubenswrapper[5102]: I0123 09:09:26.250422 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fqld" event={"ID":"563fe6e0-ce28-4075-84d2-36d66fb9b6c0","Type":"ContainerDied","Data":"8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e"} Jan 23 09:09:26 crc kubenswrapper[5102]: I0123 09:09:26.250670 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fqld" event={"ID":"563fe6e0-ce28-4075-84d2-36d66fb9b6c0","Type":"ContainerStarted","Data":"b352d8c9eb071ec9486c484ff08dc7315b85a5ea1ec76d8f95fb2b5f6b489aa5"} Jan 23 09:09:26 crc kubenswrapper[5102]: I0123 09:09:26.252298 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 09:09:27 crc kubenswrapper[5102]: I0123 09:09:27.258988 5102 generic.go:334] "Generic (PLEG): container finished" podID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerID="b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717" exitCode=0 Jan 23 09:09:27 crc kubenswrapper[5102]: I0123 09:09:27.259295 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fqld" event={"ID":"563fe6e0-ce28-4075-84d2-36d66fb9b6c0","Type":"ContainerDied","Data":"b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717"} Jan 23 09:09:28 crc kubenswrapper[5102]: I0123 09:09:28.270143 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fqld" event={"ID":"563fe6e0-ce28-4075-84d2-36d66fb9b6c0","Type":"ContainerStarted","Data":"1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53"} Jan 23 09:09:28 crc kubenswrapper[5102]: I0123 09:09:28.291006 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-8fqld" podStartSLOduration=2.82027053 podStartE2EDuration="4.290980971s" podCreationTimestamp="2026-01-23 09:09:24 +0000 UTC" firstStartedPulling="2026-01-23 09:09:26.252064867 +0000 UTC m=+8117.072413842" lastFinishedPulling="2026-01-23 09:09:27.722775308 +0000 UTC m=+8118.543124283" observedRunningTime="2026-01-23 09:09:28.28580643 +0000 UTC m=+8119.106155395" watchObservedRunningTime="2026-01-23 09:09:28.290980971 +0000 UTC m=+8119.111329946" Jan 23 09:09:35 crc kubenswrapper[5102]: I0123 09:09:35.006647 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:35 crc kubenswrapper[5102]: I0123 09:09:35.007190 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:35 crc kubenswrapper[5102]: I0123 09:09:35.056120 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:35 crc kubenswrapper[5102]: I0123 09:09:35.364392 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:35 crc kubenswrapper[5102]: I0123 09:09:35.421491 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fqld"] Jan 23 09:09:37 crc kubenswrapper[5102]: I0123 09:09:37.326356 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-8fqld" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="registry-server" containerID="cri-o://1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53" gracePeriod=2 Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.219093 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.251020 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sgvw6\" (UniqueName: \"kubernetes.io/projected/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-kube-api-access-sgvw6\") pod \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.251173 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-catalog-content\") pod \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.251212 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-utilities\") pod \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\" (UID: \"563fe6e0-ce28-4075-84d2-36d66fb9b6c0\") " Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.253002 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-utilities" (OuterVolumeSpecName: "utilities") pod "563fe6e0-ce28-4075-84d2-36d66fb9b6c0" (UID: "563fe6e0-ce28-4075-84d2-36d66fb9b6c0"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.258929 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-kube-api-access-sgvw6" (OuterVolumeSpecName: "kube-api-access-sgvw6") pod "563fe6e0-ce28-4075-84d2-36d66fb9b6c0" (UID: "563fe6e0-ce28-4075-84d2-36d66fb9b6c0"). InnerVolumeSpecName "kube-api-access-sgvw6". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.289048 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "563fe6e0-ce28-4075-84d2-36d66fb9b6c0" (UID: "563fe6e0-ce28-4075-84d2-36d66fb9b6c0"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.337848 5102 generic.go:334] "Generic (PLEG): container finished" podID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerID="1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53" exitCode=0 Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.337956 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-8fqld" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.338001 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fqld" event={"ID":"563fe6e0-ce28-4075-84d2-36d66fb9b6c0","Type":"ContainerDied","Data":"1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53"} Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.340682 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-8fqld" event={"ID":"563fe6e0-ce28-4075-84d2-36d66fb9b6c0","Type":"ContainerDied","Data":"b352d8c9eb071ec9486c484ff08dc7315b85a5ea1ec76d8f95fb2b5f6b489aa5"} Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.340715 5102 scope.go:117] "RemoveContainer" containerID="1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.355447 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.355509 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.355522 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sgvw6\" (UniqueName: \"kubernetes.io/projected/563fe6e0-ce28-4075-84d2-36d66fb9b6c0-kube-api-access-sgvw6\") on node \"crc\" DevicePath \"\"" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.387444 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fqld"] Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.389469 5102 scope.go:117] "RemoveContainer" containerID="b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.403942 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-8fqld"] Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.410060 5102 scope.go:117] "RemoveContainer" containerID="8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.431347 5102 scope.go:117] "RemoveContainer" containerID="1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53" Jan 23 09:09:38 crc kubenswrapper[5102]: E0123 09:09:38.432145 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53\": container with ID starting with 1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53 not found: ID does not exist" containerID="1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.432191 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53"} err="failed to get container status \"1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53\": rpc error: code = NotFound desc = could not find container \"1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53\": container with ID starting with 1719f1164539a938f76e88ff0c9ea2f7ee9c949bdbd991104329fa1a9f5feb53 not found: ID does not exist" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.432219 5102 scope.go:117] "RemoveContainer" containerID="b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717" Jan 23 09:09:38 crc kubenswrapper[5102]: E0123 09:09:38.432699 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717\": container with ID starting with b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717 not found: ID does not exist" containerID="b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.432730 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717"} err="failed to get container status \"b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717\": rpc error: code = NotFound desc = could not find container \"b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717\": container with ID starting with b85dfb08fa235955bdf046b928f151495a81b5b3321a7626f76edbae39111717 not found: ID does not exist" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.432752 5102 scope.go:117] "RemoveContainer" containerID="8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e" Jan 23 09:09:38 crc kubenswrapper[5102]: E0123 09:09:38.433201 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e\": container with ID starting with 8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e not found: ID does not exist" containerID="8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e" Jan 23 09:09:38 crc kubenswrapper[5102]: I0123 09:09:38.433231 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e"} err="failed to get container status \"8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e\": rpc error: code = NotFound desc = could not find container \"8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e\": container with ID starting with 8f476a1a1636f0eb42b41f347d161e6fcab01c6cb8222aec58cd6c4e1744512e not found: ID does not exist" Jan 23 09:09:39 crc kubenswrapper[5102]: I0123 09:09:39.609155 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" path="/var/lib/kubelet/pods/563fe6e0-ce28-4075-84d2-36d66fb9b6c0/volumes" Jan 23 09:09:46 crc kubenswrapper[5102]: I0123 09:09:46.768956 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:09:46 crc kubenswrapper[5102]: I0123 09:09:46.769630 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:09:50 crc kubenswrapper[5102]: I0123 09:09:50.936011 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-vg59s"] Jan 23 09:09:50 crc kubenswrapper[5102]: E0123 09:09:50.936647 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="registry-server" Jan 23 09:09:50 crc kubenswrapper[5102]: I0123 09:09:50.936660 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="registry-server" Jan 23 09:09:50 crc kubenswrapper[5102]: E0123 09:09:50.936679 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="extract-utilities" Jan 23 09:09:50 crc kubenswrapper[5102]: I0123 09:09:50.936685 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="extract-utilities" Jan 23 09:09:50 crc kubenswrapper[5102]: E0123 09:09:50.936695 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="extract-content" Jan 23 09:09:50 crc kubenswrapper[5102]: I0123 09:09:50.936702 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="extract-content" Jan 23 09:09:50 crc kubenswrapper[5102]: I0123 09:09:50.936880 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="563fe6e0-ce28-4075-84d2-36d66fb9b6c0" containerName="registry-server" Jan 23 09:09:50 crc kubenswrapper[5102]: I0123 09:09:50.937937 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:50 crc kubenswrapper[5102]: I0123 09:09:50.948171 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vg59s"] Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.079419 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kxflh\" (UniqueName: \"kubernetes.io/projected/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-kube-api-access-kxflh\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.079471 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-catalog-content\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.079507 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-utilities\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.180616 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kxflh\" (UniqueName: \"kubernetes.io/projected/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-kube-api-access-kxflh\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.180677 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-catalog-content\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.180702 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-utilities\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.181176 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-utilities\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.181245 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-catalog-content\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.201012 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kxflh\" (UniqueName: \"kubernetes.io/projected/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-kube-api-access-kxflh\") pod \"community-operators-vg59s\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.265615 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:09:51 crc kubenswrapper[5102]: I0123 09:09:51.805005 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-vg59s"] Jan 23 09:09:52 crc kubenswrapper[5102]: I0123 09:09:52.437304 5102 generic.go:334] "Generic (PLEG): container finished" podID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerID="93bb679fe79fb72c194a15261b177aa6eb130bceb321d42877e7043ad5adff00" exitCode=0 Jan 23 09:09:52 crc kubenswrapper[5102]: I0123 09:09:52.437367 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vg59s" event={"ID":"904335e9-d2d8-4648-bf8e-8ef0c192b8f7","Type":"ContainerDied","Data":"93bb679fe79fb72c194a15261b177aa6eb130bceb321d42877e7043ad5adff00"} Jan 23 09:09:52 crc kubenswrapper[5102]: I0123 09:09:52.437414 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vg59s" event={"ID":"904335e9-d2d8-4648-bf8e-8ef0c192b8f7","Type":"ContainerStarted","Data":"9dfeb2843f70a38d8f805cd710f9c5938f809572f6efb81a7d69514a507dff18"} Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.352052 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-xk4wh"] Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.368848 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xk4wh"] Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.370168 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.413287 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-shjgx\" (UniqueName: \"kubernetes.io/projected/f8e568f4-9641-4e10-9872-d15287b92d47-kube-api-access-shjgx\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.413356 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e568f4-9641-4e10-9872-d15287b92d47-catalog-content\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.413408 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e568f4-9641-4e10-9872-d15287b92d47-utilities\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.446117 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vg59s" event={"ID":"904335e9-d2d8-4648-bf8e-8ef0c192b8f7","Type":"ContainerStarted","Data":"64caa3478d26da6f9ac952df98d4048b9e8a3705bcd7e959a0ecccccf2bb8197"} Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.514191 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-shjgx\" (UniqueName: \"kubernetes.io/projected/f8e568f4-9641-4e10-9872-d15287b92d47-kube-api-access-shjgx\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.514462 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e568f4-9641-4e10-9872-d15287b92d47-catalog-content\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.514568 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e568f4-9641-4e10-9872-d15287b92d47-utilities\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.515277 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/f8e568f4-9641-4e10-9872-d15287b92d47-catalog-content\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.515527 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/f8e568f4-9641-4e10-9872-d15287b92d47-utilities\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.552623 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-shjgx\" (UniqueName: \"kubernetes.io/projected/f8e568f4-9641-4e10-9872-d15287b92d47-kube-api-access-shjgx\") pod \"certified-operators-xk4wh\" (UID: \"f8e568f4-9641-4e10-9872-d15287b92d47\") " pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.555397 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-h8d2c"] Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.557807 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.567486 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h8d2c"] Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.616165 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-utilities\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.616502 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hckfw\" (UniqueName: \"kubernetes.io/projected/98bde6ce-79da-4b97-8174-762c312a60d2-kube-api-access-hckfw\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.616614 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-catalog-content\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.700911 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.718089 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-utilities\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.718147 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hckfw\" (UniqueName: \"kubernetes.io/projected/98bde6ce-79da-4b97-8174-762c312a60d2-kube-api-access-hckfw\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.718175 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-catalog-content\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.718629 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-utilities\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.718783 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-catalog-content\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.741901 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hckfw\" (UniqueName: \"kubernetes.io/projected/98bde6ce-79da-4b97-8174-762c312a60d2-kube-api-access-hckfw\") pod \"redhat-operators-h8d2c\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:53 crc kubenswrapper[5102]: I0123 09:09:53.925865 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.058674 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xk4wh"] Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.289565 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-h8d2c"] Jan 23 09:09:54 crc kubenswrapper[5102]: W0123 09:09:54.304828 5102 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod98bde6ce_79da_4b97_8174_762c312a60d2.slice/crio-9dfe144da2935613f12832cb8eeec94d1b78ffade147a9184fa26bc464fe372b WatchSource:0}: Error finding container 9dfe144da2935613f12832cb8eeec94d1b78ffade147a9184fa26bc464fe372b: Status 404 returned error can't find the container with id 9dfe144da2935613f12832cb8eeec94d1b78ffade147a9184fa26bc464fe372b Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.456342 5102 generic.go:334] "Generic (PLEG): container finished" podID="f8e568f4-9641-4e10-9872-d15287b92d47" containerID="ccd01eba957ef6523bd4f51daf298577cb563fd4b3347290bb5bc034a6f4fa56" exitCode=0 Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.456483 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk4wh" event={"ID":"f8e568f4-9641-4e10-9872-d15287b92d47","Type":"ContainerDied","Data":"ccd01eba957ef6523bd4f51daf298577cb563fd4b3347290bb5bc034a6f4fa56"} Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.456583 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk4wh" event={"ID":"f8e568f4-9641-4e10-9872-d15287b92d47","Type":"ContainerStarted","Data":"021fbedddb89282ad656753bf40d42e345c0786d7bba281892ca3ddaee62f324"} Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.462129 5102 generic.go:334] "Generic (PLEG): container finished" podID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerID="64caa3478d26da6f9ac952df98d4048b9e8a3705bcd7e959a0ecccccf2bb8197" exitCode=0 Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.462193 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vg59s" event={"ID":"904335e9-d2d8-4648-bf8e-8ef0c192b8f7","Type":"ContainerDied","Data":"64caa3478d26da6f9ac952df98d4048b9e8a3705bcd7e959a0ecccccf2bb8197"} Jan 23 09:09:54 crc kubenswrapper[5102]: I0123 09:09:54.464410 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8d2c" event={"ID":"98bde6ce-79da-4b97-8174-762c312a60d2","Type":"ContainerStarted","Data":"9dfe144da2935613f12832cb8eeec94d1b78ffade147a9184fa26bc464fe372b"} Jan 23 09:09:55 crc kubenswrapper[5102]: I0123 09:09:55.474637 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vg59s" event={"ID":"904335e9-d2d8-4648-bf8e-8ef0c192b8f7","Type":"ContainerStarted","Data":"a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08"} Jan 23 09:09:55 crc kubenswrapper[5102]: I0123 09:09:55.477605 5102 generic.go:334] "Generic (PLEG): container finished" podID="98bde6ce-79da-4b97-8174-762c312a60d2" containerID="9ce94f066a3c283ea275bf9eeeb6e9a897ed3a82651cfe1f83dfe9c3f4eb7688" exitCode=0 Jan 23 09:09:55 crc kubenswrapper[5102]: I0123 09:09:55.477661 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8d2c" event={"ID":"98bde6ce-79da-4b97-8174-762c312a60d2","Type":"ContainerDied","Data":"9ce94f066a3c283ea275bf9eeeb6e9a897ed3a82651cfe1f83dfe9c3f4eb7688"} Jan 23 09:09:55 crc kubenswrapper[5102]: I0123 09:09:55.500334 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-vg59s" podStartSLOduration=2.707667959 podStartE2EDuration="5.500312939s" podCreationTimestamp="2026-01-23 09:09:50 +0000 UTC" firstStartedPulling="2026-01-23 09:09:52.440103196 +0000 UTC m=+8143.260452171" lastFinishedPulling="2026-01-23 09:09:55.232748176 +0000 UTC m=+8146.053097151" observedRunningTime="2026-01-23 09:09:55.494279301 +0000 UTC m=+8146.314628286" watchObservedRunningTime="2026-01-23 09:09:55.500312939 +0000 UTC m=+8146.320661914" Jan 23 09:09:56 crc kubenswrapper[5102]: I0123 09:09:56.499471 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8d2c" event={"ID":"98bde6ce-79da-4b97-8174-762c312a60d2","Type":"ContainerStarted","Data":"c61a7f5f87f17b570b78277ecbf9a0bff20a16b24d939a8166212c3dbfee65f2"} Jan 23 09:09:57 crc kubenswrapper[5102]: I0123 09:09:57.511738 5102 generic.go:334] "Generic (PLEG): container finished" podID="98bde6ce-79da-4b97-8174-762c312a60d2" containerID="c61a7f5f87f17b570b78277ecbf9a0bff20a16b24d939a8166212c3dbfee65f2" exitCode=0 Jan 23 09:09:57 crc kubenswrapper[5102]: I0123 09:09:57.511803 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8d2c" event={"ID":"98bde6ce-79da-4b97-8174-762c312a60d2","Type":"ContainerDied","Data":"c61a7f5f87f17b570b78277ecbf9a0bff20a16b24d939a8166212c3dbfee65f2"} Jan 23 09:09:59 crc kubenswrapper[5102]: I0123 09:09:59.530653 5102 generic.go:334] "Generic (PLEG): container finished" podID="f8e568f4-9641-4e10-9872-d15287b92d47" containerID="23141ebc759266cdcda599220a60930a4f6ac44f622d537c2f78b72b4d96e700" exitCode=0 Jan 23 09:09:59 crc kubenswrapper[5102]: I0123 09:09:59.530767 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk4wh" event={"ID":"f8e568f4-9641-4e10-9872-d15287b92d47","Type":"ContainerDied","Data":"23141ebc759266cdcda599220a60930a4f6ac44f622d537c2f78b72b4d96e700"} Jan 23 09:09:59 crc kubenswrapper[5102]: I0123 09:09:59.537117 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8d2c" event={"ID":"98bde6ce-79da-4b97-8174-762c312a60d2","Type":"ContainerStarted","Data":"c414d4ce9207eac43f27e04c4aacd14d0a8d002b3057dcda2ff0e322f3a566f1"} Jan 23 09:09:59 crc kubenswrapper[5102]: I0123 09:09:59.579399 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-h8d2c" podStartSLOduration=3.093063074 podStartE2EDuration="6.579377884s" podCreationTimestamp="2026-01-23 09:09:53 +0000 UTC" firstStartedPulling="2026-01-23 09:09:55.47944821 +0000 UTC m=+8146.299797185" lastFinishedPulling="2026-01-23 09:09:58.96576302 +0000 UTC m=+8149.786111995" observedRunningTime="2026-01-23 09:09:59.574448141 +0000 UTC m=+8150.394797116" watchObservedRunningTime="2026-01-23 09:09:59.579377884 +0000 UTC m=+8150.399726859" Jan 23 09:10:00 crc kubenswrapper[5102]: I0123 09:10:00.546591 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-xk4wh" event={"ID":"f8e568f4-9641-4e10-9872-d15287b92d47","Type":"ContainerStarted","Data":"c55d0147046fcda50f233c39cdccd11c9002f43fec3fba45c1b20b9ba8d39663"} Jan 23 09:10:00 crc kubenswrapper[5102]: I0123 09:10:00.571404 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-xk4wh" podStartSLOduration=2.062889918 podStartE2EDuration="7.571381983s" podCreationTimestamp="2026-01-23 09:09:53 +0000 UTC" firstStartedPulling="2026-01-23 09:09:54.458577774 +0000 UTC m=+8145.278926749" lastFinishedPulling="2026-01-23 09:09:59.967069829 +0000 UTC m=+8150.787418814" observedRunningTime="2026-01-23 09:10:00.566764019 +0000 UTC m=+8151.387112994" watchObservedRunningTime="2026-01-23 09:10:00.571381983 +0000 UTC m=+8151.391730978" Jan 23 09:10:01 crc kubenswrapper[5102]: I0123 09:10:01.266030 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:10:01 crc kubenswrapper[5102]: I0123 09:10:01.266120 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:10:01 crc kubenswrapper[5102]: I0123 09:10:01.314353 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:10:01 crc kubenswrapper[5102]: I0123 09:10:01.592990 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:10:03 crc kubenswrapper[5102]: I0123 09:10:03.701635 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:10:03 crc kubenswrapper[5102]: I0123 09:10:03.701707 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:10:03 crc kubenswrapper[5102]: I0123 09:10:03.743508 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:10:03 crc kubenswrapper[5102]: I0123 09:10:03.927527 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:10:03 crc kubenswrapper[5102]: I0123 09:10:03.927600 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:10:04 crc kubenswrapper[5102]: I0123 09:10:04.726182 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vg59s"] Jan 23 09:10:04 crc kubenswrapper[5102]: I0123 09:10:04.726473 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-vg59s" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="registry-server" containerID="cri-o://a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08" gracePeriod=2 Jan 23 09:10:04 crc kubenswrapper[5102]: I0123 09:10:04.979236 5102 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-h8d2c" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="registry-server" probeResult="failure" output=< Jan 23 09:10:04 crc kubenswrapper[5102]: timeout: failed to connect service ":50051" within 1s Jan 23 09:10:04 crc kubenswrapper[5102]: > Jan 23 09:10:11 crc kubenswrapper[5102]: E0123 09:10:11.266637 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08 is running failed: container process not found" containerID="a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 09:10:11 crc kubenswrapper[5102]: E0123 09:10:11.268306 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08 is running failed: container process not found" containerID="a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 09:10:11 crc kubenswrapper[5102]: E0123 09:10:11.268651 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08 is running failed: container process not found" containerID="a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 09:10:11 crc kubenswrapper[5102]: E0123 09:10:11.268748 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/community-operators-vg59s" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="registry-server" Jan 23 09:10:12 crc kubenswrapper[5102]: I0123 09:10:12.454467 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vg59s_904335e9-d2d8-4648-bf8e-8ef0c192b8f7/registry-server/0.log" Jan 23 09:10:12 crc kubenswrapper[5102]: I0123 09:10:12.456323 5102 generic.go:334] "Generic (PLEG): container finished" podID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerID="a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08" exitCode=137 Jan 23 09:10:12 crc kubenswrapper[5102]: I0123 09:10:12.456392 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vg59s" event={"ID":"904335e9-d2d8-4648-bf8e-8ef0c192b8f7","Type":"ContainerDied","Data":"a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08"} Jan 23 09:10:13 crc kubenswrapper[5102]: I0123 09:10:13.757779 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-xk4wh" Jan 23 09:10:13 crc kubenswrapper[5102]: I0123 09:10:13.970218 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:10:14 crc kubenswrapper[5102]: I0123 09:10:14.015718 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:10:14 crc kubenswrapper[5102]: I0123 09:10:14.996954 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h8d2c"] Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.250275 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-xk4wh"] Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.477173 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-h8d2c" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="registry-server" containerID="cri-o://c414d4ce9207eac43f27e04c4aacd14d0a8d002b3057dcda2ff0e322f3a566f1" gracePeriod=2 Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.553489 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vg59s_904335e9-d2d8-4648-bf8e-8ef0c192b8f7/registry-server/0.log" Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.554480 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.596682 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sqprz"] Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.597107 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sqprz" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="registry-server" containerID="cri-o://714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6" gracePeriod=2 Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.730284 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-catalog-content\") pod \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.730654 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kxflh\" (UniqueName: \"kubernetes.io/projected/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-kube-api-access-kxflh\") pod \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.730760 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-utilities\") pod \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\" (UID: \"904335e9-d2d8-4648-bf8e-8ef0c192b8f7\") " Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.731555 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-utilities" (OuterVolumeSpecName: "utilities") pod "904335e9-d2d8-4648-bf8e-8ef0c192b8f7" (UID: "904335e9-d2d8-4648-bf8e-8ef0c192b8f7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.735920 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-kube-api-access-kxflh" (OuterVolumeSpecName: "kube-api-access-kxflh") pod "904335e9-d2d8-4648-bf8e-8ef0c192b8f7" (UID: "904335e9-d2d8-4648-bf8e-8ef0c192b8f7"). InnerVolumeSpecName "kube-api-access-kxflh". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.787868 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "904335e9-d2d8-4648-bf8e-8ef0c192b8f7" (UID: "904335e9-d2d8-4648-bf8e-8ef0c192b8f7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.835413 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.835450 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:15 crc kubenswrapper[5102]: I0123 09:10:15.835462 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kxflh\" (UniqueName: \"kubernetes.io/projected/904335e9-d2d8-4648-bf8e-8ef0c192b8f7-kube-api-access-kxflh\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.485406 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-vg59s_904335e9-d2d8-4648-bf8e-8ef0c192b8f7/registry-server/0.log" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.487118 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-vg59s" event={"ID":"904335e9-d2d8-4648-bf8e-8ef0c192b8f7","Type":"ContainerDied","Data":"9dfeb2843f70a38d8f805cd710f9c5938f809572f6efb81a7d69514a507dff18"} Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.487178 5102 scope.go:117] "RemoveContainer" containerID="a58933e846ec93f94199e3e97cb2eb67b86bb88b0b980b05f9cd53c8a1ea7d08" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.487359 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-vg59s" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.505444 5102 scope.go:117] "RemoveContainer" containerID="64caa3478d26da6f9ac952df98d4048b9e8a3705bcd7e959a0ecccccf2bb8197" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.527889 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-vg59s"] Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.532912 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-vg59s"] Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.536655 5102 scope.go:117] "RemoveContainer" containerID="93bb679fe79fb72c194a15261b177aa6eb130bceb321d42877e7043ad5adff00" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.768214 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.768300 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.768351 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.769004 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 09:10:16 crc kubenswrapper[5102]: I0123 09:10:16.769066 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" gracePeriod=600 Jan 23 09:10:17 crc kubenswrapper[5102]: I0123 09:10:17.605991 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" path="/var/lib/kubelet/pods/904335e9-d2d8-4648-bf8e-8ef0c192b8f7/volumes" Jan 23 09:10:18 crc kubenswrapper[5102]: E0123 09:10:18.378066 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6 is running failed: container process not found" containerID="714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 09:10:18 crc kubenswrapper[5102]: E0123 09:10:18.378579 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6 is running failed: container process not found" containerID="714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 09:10:18 crc kubenswrapper[5102]: E0123 09:10:18.378954 5102 log.go:32] "ExecSync cmd from runtime service failed" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6 is running failed: container process not found" containerID="714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6" cmd=["grpc_health_probe","-addr=:50051"] Jan 23 09:10:18 crc kubenswrapper[5102]: E0123 09:10:18.378989 5102 prober.go:104] "Probe errored" err="rpc error: code = NotFound desc = container is not created or running: checking if PID of 714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6 is running failed: container process not found" probeType="Readiness" pod="openshift-marketplace/certified-operators-sqprz" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="registry-server" Jan 23 09:10:18 crc kubenswrapper[5102]: I0123 09:10:18.507600 5102 generic.go:334] "Generic (PLEG): container finished" podID="98bde6ce-79da-4b97-8174-762c312a60d2" containerID="c414d4ce9207eac43f27e04c4aacd14d0a8d002b3057dcda2ff0e322f3a566f1" exitCode=0 Jan 23 09:10:18 crc kubenswrapper[5102]: I0123 09:10:18.507653 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8d2c" event={"ID":"98bde6ce-79da-4b97-8174-762c312a60d2","Type":"ContainerDied","Data":"c414d4ce9207eac43f27e04c4aacd14d0a8d002b3057dcda2ff0e322f3a566f1"} Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.525441 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" exitCode=0 Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.525520 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066"} Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.526059 5102 scope.go:117] "RemoveContainer" containerID="af96fc80678d418fded2f3c9b09bc8d8708114510615c34caed1bc4c2b18c0f9" Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.539993 5102 generic.go:334] "Generic (PLEG): container finished" podID="2bce5a3d-171e-4219-87d8-827d2101389a" containerID="714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6" exitCode=0 Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.540054 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sqprz" event={"ID":"2bce5a3d-171e-4219-87d8-827d2101389a","Type":"ContainerDied","Data":"714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6"} Jan 23 09:10:19 crc kubenswrapper[5102]: E0123 09:10:19.615208 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.704681 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.799000 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-utilities\") pod \"98bde6ce-79da-4b97-8174-762c312a60d2\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.799173 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hckfw\" (UniqueName: \"kubernetes.io/projected/98bde6ce-79da-4b97-8174-762c312a60d2-kube-api-access-hckfw\") pod \"98bde6ce-79da-4b97-8174-762c312a60d2\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.799275 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-catalog-content\") pod \"98bde6ce-79da-4b97-8174-762c312a60d2\" (UID: \"98bde6ce-79da-4b97-8174-762c312a60d2\") " Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.800639 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-utilities" (OuterVolumeSpecName: "utilities") pod "98bde6ce-79da-4b97-8174-762c312a60d2" (UID: "98bde6ce-79da-4b97-8174-762c312a60d2"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.806829 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/98bde6ce-79da-4b97-8174-762c312a60d2-kube-api-access-hckfw" (OuterVolumeSpecName: "kube-api-access-hckfw") pod "98bde6ce-79da-4b97-8174-762c312a60d2" (UID: "98bde6ce-79da-4b97-8174-762c312a60d2"). InnerVolumeSpecName "kube-api-access-hckfw". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.901492 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.901552 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hckfw\" (UniqueName: \"kubernetes.io/projected/98bde6ce-79da-4b97-8174-762c312a60d2-kube-api-access-hckfw\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:19 crc kubenswrapper[5102]: I0123 09:10:19.940554 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "98bde6ce-79da-4b97-8174-762c312a60d2" (UID: "98bde6ce-79da-4b97-8174-762c312a60d2"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.003731 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/98bde6ce-79da-4b97-8174-762c312a60d2-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.025507 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.105162 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-54crz\" (UniqueName: \"kubernetes.io/projected/2bce5a3d-171e-4219-87d8-827d2101389a-kube-api-access-54crz\") pod \"2bce5a3d-171e-4219-87d8-827d2101389a\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.105389 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-catalog-content\") pod \"2bce5a3d-171e-4219-87d8-827d2101389a\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.105455 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-utilities\") pod \"2bce5a3d-171e-4219-87d8-827d2101389a\" (UID: \"2bce5a3d-171e-4219-87d8-827d2101389a\") " Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.107332 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-utilities" (OuterVolumeSpecName: "utilities") pod "2bce5a3d-171e-4219-87d8-827d2101389a" (UID: "2bce5a3d-171e-4219-87d8-827d2101389a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.110689 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/2bce5a3d-171e-4219-87d8-827d2101389a-kube-api-access-54crz" (OuterVolumeSpecName: "kube-api-access-54crz") pod "2bce5a3d-171e-4219-87d8-827d2101389a" (UID: "2bce5a3d-171e-4219-87d8-827d2101389a"). InnerVolumeSpecName "kube-api-access-54crz". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.169294 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "2bce5a3d-171e-4219-87d8-827d2101389a" (UID: "2bce5a3d-171e-4219-87d8-827d2101389a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.207457 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.207491 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/2bce5a3d-171e-4219-87d8-827d2101389a-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.207503 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-54crz\" (UniqueName: \"kubernetes.io/projected/2bce5a3d-171e-4219-87d8-827d2101389a-kube-api-access-54crz\") on node \"crc\" DevicePath \"\"" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.550598 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:10:20 crc kubenswrapper[5102]: E0123 09:10:20.551052 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.552706 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-h8d2c" event={"ID":"98bde6ce-79da-4b97-8174-762c312a60d2","Type":"ContainerDied","Data":"9dfe144da2935613f12832cb8eeec94d1b78ffade147a9184fa26bc464fe372b"} Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.552780 5102 scope.go:117] "RemoveContainer" containerID="c414d4ce9207eac43f27e04c4aacd14d0a8d002b3057dcda2ff0e322f3a566f1" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.552699 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-h8d2c" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.556412 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sqprz" event={"ID":"2bce5a3d-171e-4219-87d8-827d2101389a","Type":"ContainerDied","Data":"ecab60b4b8a1cfd596384b3abe2f1c84cdb87e4ccbcd9e3388c74c9fab229b1e"} Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.556514 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sqprz" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.584075 5102 scope.go:117] "RemoveContainer" containerID="c61a7f5f87f17b570b78277ecbf9a0bff20a16b24d939a8166212c3dbfee65f2" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.614471 5102 scope.go:117] "RemoveContainer" containerID="9ce94f066a3c283ea275bf9eeeb6e9a897ed3a82651cfe1f83dfe9c3f4eb7688" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.624817 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sqprz"] Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.665903 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sqprz"] Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.667218 5102 scope.go:117] "RemoveContainer" containerID="714f5e35e3f5e7ca9142f3cb0bf1ee824918d346ba62ecac3646dbffa7e017c6" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.676043 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-h8d2c"] Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.686270 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-h8d2c"] Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.690990 5102 scope.go:117] "RemoveContainer" containerID="907f8e8469d7a7e48fa966065328c9745519be5d120e9aa3b3ef6d5474c7d8e2" Jan 23 09:10:20 crc kubenswrapper[5102]: I0123 09:10:20.726590 5102 scope.go:117] "RemoveContainer" containerID="e1e7c8880806729317b61cae37fea5fca60938dccc389f9310c328bc5a645716" Jan 23 09:10:21 crc kubenswrapper[5102]: I0123 09:10:21.608824 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" path="/var/lib/kubelet/pods/2bce5a3d-171e-4219-87d8-827d2101389a/volumes" Jan 23 09:10:21 crc kubenswrapper[5102]: I0123 09:10:21.610284 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" path="/var/lib/kubelet/pods/98bde6ce-79da-4b97-8174-762c312a60d2/volumes" Jan 23 09:10:33 crc kubenswrapper[5102]: I0123 09:10:33.598224 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:10:33 crc kubenswrapper[5102]: E0123 09:10:33.598907 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:10:44 crc kubenswrapper[5102]: I0123 09:10:44.598766 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:10:44 crc kubenswrapper[5102]: E0123 09:10:44.599985 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:10:58 crc kubenswrapper[5102]: I0123 09:10:58.598030 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:10:58 crc kubenswrapper[5102]: E0123 09:10:58.598829 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:11:10 crc kubenswrapper[5102]: I0123 09:11:10.598202 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:11:10 crc kubenswrapper[5102]: E0123 09:11:10.599002 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:11:21 crc kubenswrapper[5102]: I0123 09:11:21.598152 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:11:21 crc kubenswrapper[5102]: E0123 09:11:21.599044 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:11:35 crc kubenswrapper[5102]: I0123 09:11:35.598557 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:11:35 crc kubenswrapper[5102]: E0123 09:11:35.599269 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:11:50 crc kubenswrapper[5102]: I0123 09:11:50.598989 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:11:50 crc kubenswrapper[5102]: E0123 09:11:50.600271 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:12:03 crc kubenswrapper[5102]: I0123 09:12:03.598770 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:12:03 crc kubenswrapper[5102]: E0123 09:12:03.599608 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:12:18 crc kubenswrapper[5102]: I0123 09:12:18.599281 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:12:18 crc kubenswrapper[5102]: E0123 09:12:18.600199 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:12:29 crc kubenswrapper[5102]: I0123 09:12:29.604897 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:12:29 crc kubenswrapper[5102]: E0123 09:12:29.606089 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.367345 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-vf7m2/must-gather-xqm8x"] Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368088 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368100 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368124 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="extract-content" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368130 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="extract-content" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368141 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="extract-content" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368148 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="extract-content" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368174 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="extract-utilities" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368180 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="extract-utilities" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368187 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="extract-content" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368193 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="extract-content" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368203 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368208 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368218 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="extract-utilities" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368223 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="extract-utilities" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368233 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368240 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: E0123 09:12:35.368254 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="extract-utilities" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368260 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="extract-utilities" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368385 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="2bce5a3d-171e-4219-87d8-827d2101389a" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368401 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="904335e9-d2d8-4648-bf8e-8ef0c192b8f7" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.368409 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="98bde6ce-79da-4b97-8174-762c312a60d2" containerName="registry-server" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.369112 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.371693 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-must-gather-vf7m2"/"default-dockercfg-tgzf9" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.371989 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vf7m2"/"openshift-service-ca.crt" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.388636 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-vf7m2"/"kube-root-ca.crt" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.397041 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vf7m2/must-gather-xqm8x"] Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.535710 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-must-gather-output\") pod \"must-gather-xqm8x\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.535769 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x5jh8\" (UniqueName: \"kubernetes.io/projected/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-kube-api-access-x5jh8\") pod \"must-gather-xqm8x\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.638201 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-must-gather-output\") pod \"must-gather-xqm8x\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.638278 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x5jh8\" (UniqueName: \"kubernetes.io/projected/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-kube-api-access-x5jh8\") pod \"must-gather-xqm8x\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.638795 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-must-gather-output\") pod \"must-gather-xqm8x\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.660232 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x5jh8\" (UniqueName: \"kubernetes.io/projected/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-kube-api-access-x5jh8\") pod \"must-gather-xqm8x\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.716495 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:12:35 crc kubenswrapper[5102]: I0123 09:12:35.949419 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-vf7m2/must-gather-xqm8x"] Jan 23 09:12:36 crc kubenswrapper[5102]: I0123 09:12:36.498103 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" event={"ID":"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8","Type":"ContainerStarted","Data":"79f91ae75367293c09c2ac3d53377b79def69bd98b57cd2a840cb19e41b23c0e"} Jan 23 09:12:43 crc kubenswrapper[5102]: I0123 09:12:43.597793 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:12:43 crc kubenswrapper[5102]: E0123 09:12:43.598626 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:12:44 crc kubenswrapper[5102]: I0123 09:12:44.567860 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" event={"ID":"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8","Type":"ContainerStarted","Data":"24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2"} Jan 23 09:12:45 crc kubenswrapper[5102]: I0123 09:12:45.575860 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" event={"ID":"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8","Type":"ContainerStarted","Data":"5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f"} Jan 23 09:12:45 crc kubenswrapper[5102]: I0123 09:12:45.592661 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" podStartSLOduration=2.381300532 podStartE2EDuration="10.592645884s" podCreationTimestamp="2026-01-23 09:12:35 +0000 UTC" firstStartedPulling="2026-01-23 09:12:35.964811715 +0000 UTC m=+8306.785160690" lastFinishedPulling="2026-01-23 09:12:44.176157067 +0000 UTC m=+8314.996506042" observedRunningTime="2026-01-23 09:12:45.59057599 +0000 UTC m=+8316.410924965" watchObservedRunningTime="2026-01-23 09:12:45.592645884 +0000 UTC m=+8316.412994859" Jan 23 09:12:56 crc kubenswrapper[5102]: I0123 09:12:56.598264 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:12:56 crc kubenswrapper[5102]: E0123 09:12:56.599015 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:13:10 crc kubenswrapper[5102]: I0123 09:13:10.598489 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:13:10 crc kubenswrapper[5102]: E0123 09:13:10.599230 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:13:22 crc kubenswrapper[5102]: I0123 09:13:22.598175 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:13:22 crc kubenswrapper[5102]: E0123 09:13:22.599017 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:13:36 crc kubenswrapper[5102]: I0123 09:13:36.598011 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:13:36 crc kubenswrapper[5102]: E0123 09:13:36.598785 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:13:46 crc kubenswrapper[5102]: I0123 09:13:46.621962 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g_d61d5841-724a-4fdb-8051-4625df4a1f1d/util/0.log" Jan 23 09:13:46 crc kubenswrapper[5102]: I0123 09:13:46.787128 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g_d61d5841-724a-4fdb-8051-4625df4a1f1d/util/0.log" Jan 23 09:13:46 crc kubenswrapper[5102]: I0123 09:13:46.826154 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g_d61d5841-724a-4fdb-8051-4625df4a1f1d/pull/0.log" Jan 23 09:13:46 crc kubenswrapper[5102]: I0123 09:13:46.886949 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g_d61d5841-724a-4fdb-8051-4625df4a1f1d/pull/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.079483 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g_d61d5841-724a-4fdb-8051-4625df4a1f1d/extract/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.191439 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g_d61d5841-724a-4fdb-8051-4625df4a1f1d/pull/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.204391 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_97bb1f8024535e829fa8894f35597f6754858047b9fee802213b02de86btm2g_d61d5841-724a-4fdb-8051-4625df4a1f1d/util/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.389197 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_barbican-operator-controller-manager-59dd8b7cbf-272zl_1ae0c44b-b391-4df4-8246-b0e24f649e8b/manager/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.431631 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_cinder-operator-controller-manager-69cf5d4557-wb6dk_73336c80-3616-4716-9ecf-cfe3f2114c4a/manager/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.667447 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_designate-operator-controller-manager-b45d7bf98-mth4h_eab33781-ceb7-4c8b-8df9-55ca5ab33f17/manager/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.731522 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_glance-operator-controller-manager-78fdd796fd-n79vx_52d07fd1-692c-461a-baf4-51d4af679796/manager/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.908126 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_heat-operator-controller-manager-594c8c9d5d-mjxxp_9c38e15e-2ddd-473c-892a-59aa9978e12c/manager/0.log" Jan 23 09:13:47 crc kubenswrapper[5102]: I0123 09:13:47.952373 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_horizon-operator-controller-manager-77d5c5b54f-2cmr7_d31fc107-8403-4c08-9058-483dafc58c60/manager/0.log" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.116832 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ironic-operator-controller-manager-69d6c9f5b8-wcdgn_51b93e08-8c79-4ccc-b4d4-c5d54e095284/manager/0.log" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.310472 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_infra-operator-controller-manager-54ccf4f85d-lwblf_8661bd2c-86be-46fd-95d7-df60f1736855/manager/0.log" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.413818 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_keystone-operator-controller-manager-b8b6d4659-52bz9_9a8349c3-c1d1-4549-9a7f-67755c04328f/manager/0.log" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.504847 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_manila-operator-controller-manager-78c6999f6f-rs5gj_80047622-db1e-4345-b2aa-e44f716fe6ad/manager/0.log" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.598681 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:13:48 crc kubenswrapper[5102]: E0123 09:13:48.599102 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.647812 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_mariadb-operator-controller-manager-c87fff755-j6bsb_529e16f1-1e4b-4ba2-8855-e8a445d0c63f/manager/0.log" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.772170 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_neutron-operator-controller-manager-5d8f59fb49-b78cx_40fc8f9a-303c-4264-ac77-448100591967/manager/0.log" Jan 23 09:13:48 crc kubenswrapper[5102]: I0123 09:13:48.943055 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_nova-operator-controller-manager-6b8bc8d87d-p4czx_750c537e-8dea-47b4-883c-c30a44e5f48c/manager/0.log" Jan 23 09:13:49 crc kubenswrapper[5102]: I0123 09:13:49.010605 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_octavia-operator-controller-manager-7bd9774b6-nwpwk_4027b29c-39eb-4b48-b17d-64c6587dc3fb/manager/0.log" Jan 23 09:13:49 crc kubenswrapper[5102]: I0123 09:13:49.295829 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-baremetal-operator-controller-manager-7c9c58b557fl4kc_a22f5f2d-ed47-4190-84a2-5243a6479598/manager/0.log" Jan 23 09:13:49 crc kubenswrapper[5102]: I0123 09:13:49.506654 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-init-85bfd44c94-wv42b_1b68a251-dbfe-4765-9b64-2bfa66e02d96/operator/0.log" Jan 23 09:13:49 crc kubenswrapper[5102]: I0123 09:13:49.850885 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-index-svxnq_faa339ad-749a-4eca-a33b-65d14522c3c1/registry-server/0.log" Jan 23 09:13:49 crc kubenswrapper[5102]: I0123 09:13:49.917585 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_ovn-operator-controller-manager-55db956ddc-bmq5z_8d146faa-8342-4adc-8e6d-37018df6873f/manager/0.log" Jan 23 09:13:50 crc kubenswrapper[5102]: I0123 09:13:50.178117 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_placement-operator-controller-manager-5d646b7d76-j6qxz_ff029305-9cf1-451a-b5b8-ff55bfc14dd3/manager/0.log" Jan 23 09:13:50 crc kubenswrapper[5102]: I0123 09:13:50.287636 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_rabbitmq-cluster-operator-manager-668c99d594-cdzmd_937d6dd8-25fe-4346-80f5-345f3f772ed9/operator/0.log" Jan 23 09:13:50 crc kubenswrapper[5102]: I0123 09:13:50.324917 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_openstack-operator-controller-manager-57c46955cf-k49t5_0f5bd4e4-b7d3-45ff-9efb-e2b55f546039/manager/0.log" Jan 23 09:13:50 crc kubenswrapper[5102]: I0123 09:13:50.425248 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_swift-operator-controller-manager-547cbdb99f-6js2g_9f0624d9-4655-424e-bcc9-2e445bb833c7/manager/0.log" Jan 23 09:13:50 crc kubenswrapper[5102]: I0123 09:13:50.549734 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_telemetry-operator-controller-manager-85cd9769bb-7nxxd_d9c398eb-293d-47e1-9f0d-2ce33fd8878f/manager/0.log" Jan 23 09:13:50 crc kubenswrapper[5102]: I0123 09:13:50.720616 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_test-operator-controller-manager-69797bbcbd-jhfvr_fb2a62ae-ed6a-4595-8cbc-0fb0b93087a8/manager/0.log" Jan 23 09:13:50 crc kubenswrapper[5102]: I0123 09:13:50.799480 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openstack-operators_watcher-operator-controller-manager-5ffb9c6597-hxkkl_f5a445df-fe4d-4323-993e-7d9f20cdd29c/manager/0.log" Jan 23 09:14:01 crc kubenswrapper[5102]: I0123 09:14:01.598735 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:14:01 crc kubenswrapper[5102]: E0123 09:14:01.599428 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:14:10 crc kubenswrapper[5102]: I0123 09:14:10.480603 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-b59qh_49c4be94-985a-4c36-bb76-9dc6cdb0da17/control-plane-machine-set-operator/0.log" Jan 23 09:14:10 crc kubenswrapper[5102]: I0123 09:14:10.710091 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-5q97t_7cf0a44b-e9bc-42da-8883-eb6c9a58f37e/kube-rbac-proxy/0.log" Jan 23 09:14:10 crc kubenswrapper[5102]: I0123 09:14:10.716810 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-5q97t_7cf0a44b-e9bc-42da-8883-eb6c9a58f37e/machine-api-operator/0.log" Jan 23 09:14:16 crc kubenswrapper[5102]: I0123 09:14:16.598013 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:14:16 crc kubenswrapper[5102]: E0123 09:14:16.598813 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:14:23 crc kubenswrapper[5102]: I0123 09:14:23.598470 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-n44p9_16bbdaaa-0566-4514-bfa1-186ffe09607f/cert-manager-controller/0.log" Jan 23 09:14:23 crc kubenswrapper[5102]: I0123 09:14:23.817226 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-4fll4_fc523104-1eac-4a3d-bb14-e21cc2a63e10/cert-manager-cainjector/0.log" Jan 23 09:14:23 crc kubenswrapper[5102]: I0123 09:14:23.839423 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-cspwp_010c2bf3-7da5-419e-8889-f81a5f7d8bfe/cert-manager-webhook/0.log" Jan 23 09:14:30 crc kubenswrapper[5102]: I0123 09:14:30.597880 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:14:30 crc kubenswrapper[5102]: E0123 09:14:30.598515 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:14:36 crc kubenswrapper[5102]: I0123 09:14:36.533655 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-console-plugin-7754f76f8b-v7xk7_0591e1d6-98eb-4fbe-b102-d420dfb1dd4a/nmstate-console-plugin/0.log" Jan 23 09:14:36 crc kubenswrapper[5102]: I0123 09:14:36.677458 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-handler-zj4xz_d3b2043c-f890-4def-9d04-857501627d4d/nmstate-handler/0.log" Jan 23 09:14:36 crc kubenswrapper[5102]: I0123 09:14:36.711323 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-5zbpg_b517d1cc-d6a8-4857-a414-25efdbfc523f/kube-rbac-proxy/0.log" Jan 23 09:14:36 crc kubenswrapper[5102]: I0123 09:14:36.767050 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-metrics-54757c584b-5zbpg_b517d1cc-d6a8-4857-a414-25efdbfc523f/nmstate-metrics/0.log" Jan 23 09:14:36 crc kubenswrapper[5102]: I0123 09:14:36.878591 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-operator-646758c888-9pqnm_cfc974f5-adaa-43f2-bd3f-d0cf669315f2/nmstate-operator/0.log" Jan 23 09:14:37 crc kubenswrapper[5102]: I0123 09:14:37.071492 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-nmstate_nmstate-webhook-8474b5b9d8-fhrhh_26613359-769b-4d8c-846b-aafc773eec15/nmstate-webhook/0.log" Jan 23 09:14:43 crc kubenswrapper[5102]: I0123 09:14:43.597996 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:14:43 crc kubenswrapper[5102]: E0123 09:14:43.598559 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:14:55 crc kubenswrapper[5102]: I0123 09:14:55.598374 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:14:55 crc kubenswrapper[5102]: E0123 09:14:55.599103 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.144315 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv"] Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.145830 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.149982 5102 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.151798 5102 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.154526 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv"] Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.334132 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-62b8p\" (UniqueName: \"kubernetes.io/projected/0563993f-d6fa-4bf4-996c-13c2abdc84e2-kube-api-access-62b8p\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.334322 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0563993f-d6fa-4bf4-996c-13c2abdc84e2-secret-volume\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.334372 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0563993f-d6fa-4bf4-996c-13c2abdc84e2-config-volume\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.435286 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0563993f-d6fa-4bf4-996c-13c2abdc84e2-secret-volume\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.435340 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0563993f-d6fa-4bf4-996c-13c2abdc84e2-config-volume\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.435388 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-62b8p\" (UniqueName: \"kubernetes.io/projected/0563993f-d6fa-4bf4-996c-13c2abdc84e2-kube-api-access-62b8p\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.436372 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0563993f-d6fa-4bf4-996c-13c2abdc84e2-config-volume\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.441580 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0563993f-d6fa-4bf4-996c-13c2abdc84e2-secret-volume\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.458298 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-62b8p\" (UniqueName: \"kubernetes.io/projected/0563993f-d6fa-4bf4-996c-13c2abdc84e2-kube-api-access-62b8p\") pod \"collect-profiles-29485995-ctlwv\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.466090 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:00 crc kubenswrapper[5102]: I0123 09:15:00.902601 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv"] Jan 23 09:15:01 crc kubenswrapper[5102]: I0123 09:15:01.631809 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" event={"ID":"0563993f-d6fa-4bf4-996c-13c2abdc84e2","Type":"ContainerStarted","Data":"11bdc462b89552a821beb82333ffea8c480374578109d8861bcd4b29ea1990bf"} Jan 23 09:15:01 crc kubenswrapper[5102]: I0123 09:15:01.632051 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" event={"ID":"0563993f-d6fa-4bf4-996c-13c2abdc84e2","Type":"ContainerStarted","Data":"844592c8afd3de60e7cb440bb49d2914bf0265606bd4ba22a423d213f73d80c1"} Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.639910 5102 generic.go:334] "Generic (PLEG): container finished" podID="0563993f-d6fa-4bf4-996c-13c2abdc84e2" containerID="11bdc462b89552a821beb82333ffea8c480374578109d8861bcd4b29ea1990bf" exitCode=0 Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.639966 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" event={"ID":"0563993f-d6fa-4bf4-996c-13c2abdc84e2","Type":"ContainerDied","Data":"11bdc462b89552a821beb82333ffea8c480374578109d8861bcd4b29ea1990bf"} Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.915804 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.972829 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0563993f-d6fa-4bf4-996c-13c2abdc84e2-secret-volume\") pod \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.972942 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-62b8p\" (UniqueName: \"kubernetes.io/projected/0563993f-d6fa-4bf4-996c-13c2abdc84e2-kube-api-access-62b8p\") pod \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.972980 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0563993f-d6fa-4bf4-996c-13c2abdc84e2-config-volume\") pod \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\" (UID: \"0563993f-d6fa-4bf4-996c-13c2abdc84e2\") " Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.973873 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0563993f-d6fa-4bf4-996c-13c2abdc84e2-config-volume" (OuterVolumeSpecName: "config-volume") pod "0563993f-d6fa-4bf4-996c-13c2abdc84e2" (UID: "0563993f-d6fa-4bf4-996c-13c2abdc84e2"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.978366 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0563993f-d6fa-4bf4-996c-13c2abdc84e2-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "0563993f-d6fa-4bf4-996c-13c2abdc84e2" (UID: "0563993f-d6fa-4bf4-996c-13c2abdc84e2"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Jan 23 09:15:02 crc kubenswrapper[5102]: I0123 09:15:02.994253 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0563993f-d6fa-4bf4-996c-13c2abdc84e2-kube-api-access-62b8p" (OuterVolumeSpecName: "kube-api-access-62b8p") pod "0563993f-d6fa-4bf4-996c-13c2abdc84e2" (UID: "0563993f-d6fa-4bf4-996c-13c2abdc84e2"). InnerVolumeSpecName "kube-api-access-62b8p". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.075743 5102 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/0563993f-d6fa-4bf4-996c-13c2abdc84e2-secret-volume\") on node \"crc\" DevicePath \"\"" Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.075797 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-62b8p\" (UniqueName: \"kubernetes.io/projected/0563993f-d6fa-4bf4-996c-13c2abdc84e2-kube-api-access-62b8p\") on node \"crc\" DevicePath \"\"" Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.075812 5102 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/0563993f-d6fa-4bf4-996c-13c2abdc84e2-config-volume\") on node \"crc\" DevicePath \"\"" Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.649401 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" event={"ID":"0563993f-d6fa-4bf4-996c-13c2abdc84e2","Type":"ContainerDied","Data":"844592c8afd3de60e7cb440bb49d2914bf0265606bd4ba22a423d213f73d80c1"} Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.649452 5102 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="844592c8afd3de60e7cb440bb49d2914bf0265606bd4ba22a423d213f73d80c1" Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.649561 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29485995-ctlwv" Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.858762 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-6pgbp_3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95/kube-rbac-proxy/0.log" Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.991934 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt"] Jan 23 09:15:03 crc kubenswrapper[5102]: I0123 09:15:03.996315 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29485950-zgdvt"] Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.102442 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-frr-files/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.264990 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-frr-files/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.300117 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-reloader/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.347576 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-metrics/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.362988 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_controller-6968d8fdc4-6pgbp_3d3645bb-e0d6-42ff-a0b8-e02ebfff5b95/controller/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.469331 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-reloader/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.636365 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-metrics/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.646758 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-reloader/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.656204 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-frr-files/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.688078 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-metrics/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.850243 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-frr-files/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.850580 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-reloader/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.885303 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/cp-metrics/0.log" Jan 23 09:15:04 crc kubenswrapper[5102]: I0123 09:15:04.888738 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/controller/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.055210 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/frr-metrics/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.060953 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/kube-rbac-proxy/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.144169 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/kube-rbac-proxy-frr/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.286415 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/reloader/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.338719 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-webhook-server-7df86c4f6c-txjhb_b931d587-70ae-4f83-bc54-507647f32f2a/frr-k8s-webhook-server/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.571959 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-controller-manager-6f968486d9-tlfn6_d8a876cd-3c97-46c3-9633-62bb2f06664a/manager/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.610265 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0f83b99a-e856-40f7-a283-34567ff41f3a" path="/var/lib/kubelet/pods/0f83b99a-e856-40f7-a283-34567ff41f3a/volumes" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.681038 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_metallb-operator-webhook-server-5557795bdd-9p5gg_6452fc17-0e56-40dc-a2cc-5637175b0b81/webhook-server/0.log" Jan 23 09:15:05 crc kubenswrapper[5102]: I0123 09:15:05.738119 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-44g7f_1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0/kube-rbac-proxy/0.log" Jan 23 09:15:06 crc kubenswrapper[5102]: I0123 09:15:06.434328 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_speaker-44g7f_1b71d8b4-dfe1-4e4a-9cc7-9742bf49c0f0/speaker/0.log" Jan 23 09:15:06 crc kubenswrapper[5102]: I0123 09:15:06.597677 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:15:06 crc kubenswrapper[5102]: E0123 09:15:06.597996 5102 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-vnmgh_openshift-machine-config-operator(04f943d6-91c5-4493-b310-de0b8ef7966e)\"" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" Jan 23 09:15:06 crc kubenswrapper[5102]: I0123 09:15:06.899925 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/metallb-system_frr-k8s-845hr_20940568-aa3c-4567-8a76-8cc4508bf6ff/frr/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.192690 5102 scope.go:117] "RemoveContainer" containerID="701788de32001527bdea0aae171bd87191cf19cee4eb7adf8440d1227e9e9843" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.351874 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9_5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f/util/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.545967 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9_5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f/util/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.583974 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9_5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f/pull/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.625426 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9_5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f/pull/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.761108 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9_5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f/util/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.829826 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9_5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f/extract/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.846172 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931awngv9_5c18ca3f-e8c7-4bf1-948f-9ae9a1f4da5f/pull/0.log" Jan 23 09:15:18 crc kubenswrapper[5102]: I0123 09:15:18.985703 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56_9d7a9743-8694-4eb1-8a4a-75d0264cc0a9/util/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.150362 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56_9d7a9743-8694-4eb1-8a4a-75d0264cc0a9/util/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.180743 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56_9d7a9743-8694-4eb1-8a4a-75d0264cc0a9/pull/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.186667 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56_9d7a9743-8694-4eb1-8a4a-75d0264cc0a9/pull/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.367557 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56_9d7a9743-8694-4eb1-8a4a-75d0264cc0a9/pull/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.374538 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56_9d7a9743-8694-4eb1-8a4a-75d0264cc0a9/util/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.383799 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_270996307cd21d144be796860235064b5127c2fcf62ccccd6689c259dcnhf56_9d7a9743-8694-4eb1-8a4a-75d0264cc0a9/extract/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.549171 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w_dbbb0c70-8da6-4acc-a92d-f19a08611e94/util/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.741765 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w_dbbb0c70-8da6-4acc-a92d-f19a08611e94/pull/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.761860 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w_dbbb0c70-8da6-4acc-a92d-f19a08611e94/pull/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.765129 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w_dbbb0c70-8da6-4acc-a92d-f19a08611e94/util/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.921080 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w_dbbb0c70-8da6-4acc-a92d-f19a08611e94/util/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.937006 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w_dbbb0c70-8da6-4acc-a92d-f19a08611e94/pull/0.log" Jan 23 09:15:19 crc kubenswrapper[5102]: I0123 09:15:19.970189 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_53efe8611d43ac2275911d954e05efbbba7920a530aff9253ed1cec713hdb6w_dbbb0c70-8da6-4acc-a92d-f19a08611e94/extract/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.089971 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xk4wh_f8e568f4-9641-4e10-9872-d15287b92d47/extract-utilities/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.250258 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xk4wh_f8e568f4-9641-4e10-9872-d15287b92d47/extract-content/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.267558 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xk4wh_f8e568f4-9641-4e10-9872-d15287b92d47/extract-utilities/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.274153 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xk4wh_f8e568f4-9641-4e10-9872-d15287b92d47/extract-content/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.421939 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xk4wh_f8e568f4-9641-4e10-9872-d15287b92d47/extract-content/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.424226 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xk4wh_f8e568f4-9641-4e10-9872-d15287b92d47/extract-utilities/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.560689 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_certified-operators-xk4wh_f8e568f4-9641-4e10-9872-d15287b92d47/registry-server/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.596040 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-czpf6_e2c8263d-fc51-48be-80e5-284ebef0b5e2/extract-utilities/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.598572 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.813298 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-czpf6_e2c8263d-fc51-48be-80e5-284ebef0b5e2/extract-content/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.819156 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-czpf6_e2c8263d-fc51-48be-80e5-284ebef0b5e2/extract-content/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.857717 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-czpf6_e2c8263d-fc51-48be-80e5-284ebef0b5e2/extract-utilities/0.log" Jan 23 09:15:20 crc kubenswrapper[5102]: I0123 09:15:20.969407 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-czpf6_e2c8263d-fc51-48be-80e5-284ebef0b5e2/extract-utilities/0.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.008350 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-czpf6_e2c8263d-fc51-48be-80e5-284ebef0b5e2/extract-content/0.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.194384 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/3.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.392275 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_marketplace-operator-79b997595-wkhpj_6273c4aa-9895-47ea-a3d6-9ac16123a30f/marketplace-operator/2.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.493060 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-92s74_e68a118c-0a6c-48fb-837a-b19bb6d00b0b/extract-utilities/0.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.701712 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-92s74_e68a118c-0a6c-48fb-837a-b19bb6d00b0b/extract-utilities/0.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.750340 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-92s74_e68a118c-0a6c-48fb-837a-b19bb6d00b0b/extract-content/0.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.750375 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-92s74_e68a118c-0a6c-48fb-837a-b19bb6d00b0b/extract-content/0.log" Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.767469 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"eefe21146d375ba03998d52a2db810b8bfd92037243ff7f7562574bffaa74813"} Jan 23 09:15:21 crc kubenswrapper[5102]: I0123 09:15:21.996159 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-92s74_e68a118c-0a6c-48fb-837a-b19bb6d00b0b/extract-content/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.010002 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-92s74_e68a118c-0a6c-48fb-837a-b19bb6d00b0b/extract-utilities/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.053633 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_community-operators-czpf6_e2c8263d-fc51-48be-80e5-284ebef0b5e2/registry-server/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.259050 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-szq9g_229c9d86-29ca-4872-8819-231653c292de/extract-utilities/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.324148 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-marketplace-92s74_e68a118c-0a6c-48fb-837a-b19bb6d00b0b/registry-server/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.430111 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-szq9g_229c9d86-29ca-4872-8819-231653c292de/extract-utilities/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.435478 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-szq9g_229c9d86-29ca-4872-8819-231653c292de/extract-content/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.470976 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-szq9g_229c9d86-29ca-4872-8819-231653c292de/extract-content/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.627220 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-szq9g_229c9d86-29ca-4872-8819-231653c292de/extract-utilities/0.log" Jan 23 09:15:22 crc kubenswrapper[5102]: I0123 09:15:22.627276 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-szq9g_229c9d86-29ca-4872-8819-231653c292de/extract-content/0.log" Jan 23 09:15:23 crc kubenswrapper[5102]: I0123 09:15:23.557314 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-marketplace_redhat-operators-szq9g_229c9d86-29ca-4872-8819-231653c292de/registry-server/0.log" Jan 23 09:16:37 crc kubenswrapper[5102]: I0123 09:16:37.332134 5102 generic.go:334] "Generic (PLEG): container finished" podID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerID="24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2" exitCode=0 Jan 23 09:16:37 crc kubenswrapper[5102]: I0123 09:16:37.332225 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" event={"ID":"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8","Type":"ContainerDied","Data":"24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2"} Jan 23 09:16:37 crc kubenswrapper[5102]: I0123 09:16:37.333122 5102 scope.go:117] "RemoveContainer" containerID="24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2" Jan 23 09:16:37 crc kubenswrapper[5102]: I0123 09:16:37.607909 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vf7m2_must-gather-xqm8x_69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8/gather/0.log" Jan 23 09:16:45 crc kubenswrapper[5102]: I0123 09:16:45.556268 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-vf7m2/must-gather-xqm8x"] Jan 23 09:16:45 crc kubenswrapper[5102]: I0123 09:16:45.557043 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerName="copy" containerID="cri-o://5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f" gracePeriod=2 Jan 23 09:16:45 crc kubenswrapper[5102]: I0123 09:16:45.561952 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-vf7m2/must-gather-xqm8x"] Jan 23 09:16:45 crc kubenswrapper[5102]: I0123 09:16:45.989613 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vf7m2_must-gather-xqm8x_69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8/copy/0.log" Jan 23 09:16:45 crc kubenswrapper[5102]: I0123 09:16:45.990361 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.079596 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x5jh8\" (UniqueName: \"kubernetes.io/projected/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-kube-api-access-x5jh8\") pod \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.079711 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-must-gather-output\") pod \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\" (UID: \"69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8\") " Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.086785 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-kube-api-access-x5jh8" (OuterVolumeSpecName: "kube-api-access-x5jh8") pod "69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" (UID: "69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8"). InnerVolumeSpecName "kube-api-access-x5jh8". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.182843 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x5jh8\" (UniqueName: \"kubernetes.io/projected/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-kube-api-access-x5jh8\") on node \"crc\" DevicePath \"\"" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.258139 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" (UID: "69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.285204 5102 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8-must-gather-output\") on node \"crc\" DevicePath \"\"" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.398692 5102 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-vf7m2_must-gather-xqm8x_69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8/copy/0.log" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.399154 5102 generic.go:334] "Generic (PLEG): container finished" podID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerID="5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f" exitCode=143 Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.399220 5102 scope.go:117] "RemoveContainer" containerID="5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.399228 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-vf7m2/must-gather-xqm8x" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.420560 5102 scope.go:117] "RemoveContainer" containerID="24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.492039 5102 scope.go:117] "RemoveContainer" containerID="5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f" Jan 23 09:16:46 crc kubenswrapper[5102]: E0123 09:16:46.492496 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f\": container with ID starting with 5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f not found: ID does not exist" containerID="5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.492626 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f"} err="failed to get container status \"5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f\": rpc error: code = NotFound desc = could not find container \"5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f\": container with ID starting with 5cee9c7f41525fd0e35661a4a84d643f0d380b3c49c7774ccce0118ae2805c8f not found: ID does not exist" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.492746 5102 scope.go:117] "RemoveContainer" containerID="24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2" Jan 23 09:16:46 crc kubenswrapper[5102]: E0123 09:16:46.493244 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2\": container with ID starting with 24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2 not found: ID does not exist" containerID="24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2" Jan 23 09:16:46 crc kubenswrapper[5102]: I0123 09:16:46.493277 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2"} err="failed to get container status \"24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2\": rpc error: code = NotFound desc = could not find container \"24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2\": container with ID starting with 24023dd9944fba666364cead04a4de6f93617ad1a70b6635e8a0af1dd867ffd2 not found: ID does not exist" Jan 23 09:16:47 crc kubenswrapper[5102]: I0123 09:16:47.607228 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" path="/var/lib/kubelet/pods/69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8/volumes" Jan 23 09:17:46 crc kubenswrapper[5102]: I0123 09:17:46.769059 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:17:46 crc kubenswrapper[5102]: I0123 09:17:46.769732 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:18:16 crc kubenswrapper[5102]: I0123 09:18:16.768944 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:18:16 crc kubenswrapper[5102]: I0123 09:18:16.769532 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:18:46 crc kubenswrapper[5102]: I0123 09:18:46.768295 5102 patch_prober.go:28] interesting pod/machine-config-daemon-vnmgh container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Jan 23 09:18:46 crc kubenswrapper[5102]: I0123 09:18:46.768848 5102 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Jan 23 09:18:46 crc kubenswrapper[5102]: I0123 09:18:46.768889 5102 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" Jan 23 09:18:46 crc kubenswrapper[5102]: I0123 09:18:46.770708 5102 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"eefe21146d375ba03998d52a2db810b8bfd92037243ff7f7562574bffaa74813"} pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Jan 23 09:18:46 crc kubenswrapper[5102]: I0123 09:18:46.771218 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" podUID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerName="machine-config-daemon" containerID="cri-o://eefe21146d375ba03998d52a2db810b8bfd92037243ff7f7562574bffaa74813" gracePeriod=600 Jan 23 09:18:47 crc kubenswrapper[5102]: I0123 09:18:47.291276 5102 generic.go:334] "Generic (PLEG): container finished" podID="04f943d6-91c5-4493-b310-de0b8ef7966e" containerID="eefe21146d375ba03998d52a2db810b8bfd92037243ff7f7562574bffaa74813" exitCode=0 Jan 23 09:18:47 crc kubenswrapper[5102]: I0123 09:18:47.291382 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerDied","Data":"eefe21146d375ba03998d52a2db810b8bfd92037243ff7f7562574bffaa74813"} Jan 23 09:18:47 crc kubenswrapper[5102]: I0123 09:18:47.291710 5102 scope.go:117] "RemoveContainer" containerID="55cc3e3025847527b5a64affc9aa3739f7cec8a1b2ab82965d337fabc9625066" Jan 23 09:18:48 crc kubenswrapper[5102]: I0123 09:18:48.300035 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-vnmgh" event={"ID":"04f943d6-91c5-4493-b310-de0b8ef7966e","Type":"ContainerStarted","Data":"e510bd465102c07991f70969dee38c550ae4392166518dadf661d0c42d66d797"} Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.960918 5102 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-h7rg6"] Jan 23 09:19:32 crc kubenswrapper[5102]: E0123 09:19:32.961759 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerName="copy" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.961774 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerName="copy" Jan 23 09:19:32 crc kubenswrapper[5102]: E0123 09:19:32.961806 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerName="gather" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.961815 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerName="gather" Jan 23 09:19:32 crc kubenswrapper[5102]: E0123 09:19:32.961827 5102 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0563993f-d6fa-4bf4-996c-13c2abdc84e2" containerName="collect-profiles" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.961838 5102 state_mem.go:107] "Deleted CPUSet assignment" podUID="0563993f-d6fa-4bf4-996c-13c2abdc84e2" containerName="collect-profiles" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.962092 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerName="copy" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.962116 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="69a0b1fd-6195-4b14-ad6d-2beb3a2a0be8" containerName="gather" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.962135 5102 memory_manager.go:354] "RemoveStaleState removing state" podUID="0563993f-d6fa-4bf4-996c-13c2abdc84e2" containerName="collect-profiles" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.964123 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:32 crc kubenswrapper[5102]: I0123 09:19:32.972898 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7rg6"] Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.164946 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-utilities\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.165042 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-catalog-content\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.165083 5102 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rbqxl\" (UniqueName: \"kubernetes.io/projected/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-kube-api-access-rbqxl\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.265967 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-utilities\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.266037 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-catalog-content\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.266074 5102 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rbqxl\" (UniqueName: \"kubernetes.io/projected/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-kube-api-access-rbqxl\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.266802 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-utilities\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.266866 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-catalog-content\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.295485 5102 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rbqxl\" (UniqueName: \"kubernetes.io/projected/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-kube-api-access-rbqxl\") pod \"redhat-marketplace-h7rg6\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.592098 5102 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:33 crc kubenswrapper[5102]: I0123 09:19:33.894045 5102 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7rg6"] Jan 23 09:19:34 crc kubenswrapper[5102]: I0123 09:19:34.617513 5102 generic.go:334] "Generic (PLEG): container finished" podID="115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" containerID="9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad" exitCode=0 Jan 23 09:19:34 crc kubenswrapper[5102]: I0123 09:19:34.617601 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7rg6" event={"ID":"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8","Type":"ContainerDied","Data":"9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad"} Jan 23 09:19:34 crc kubenswrapper[5102]: I0123 09:19:34.620188 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7rg6" event={"ID":"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8","Type":"ContainerStarted","Data":"e39eee439187bb3fbd331c931326bf172bde54223b23feb2e170d5275b29dc1d"} Jan 23 09:19:34 crc kubenswrapper[5102]: I0123 09:19:34.622271 5102 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Jan 23 09:19:36 crc kubenswrapper[5102]: I0123 09:19:36.636266 5102 generic.go:334] "Generic (PLEG): container finished" podID="115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" containerID="ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a" exitCode=0 Jan 23 09:19:36 crc kubenswrapper[5102]: I0123 09:19:36.636301 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7rg6" event={"ID":"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8","Type":"ContainerDied","Data":"ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a"} Jan 23 09:19:38 crc kubenswrapper[5102]: I0123 09:19:38.652239 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7rg6" event={"ID":"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8","Type":"ContainerStarted","Data":"eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782"} Jan 23 09:19:38 crc kubenswrapper[5102]: I0123 09:19:38.676264 5102 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-h7rg6" podStartSLOduration=3.164545354 podStartE2EDuration="6.676241901s" podCreationTimestamp="2026-01-23 09:19:32 +0000 UTC" firstStartedPulling="2026-01-23 09:19:34.621739587 +0000 UTC m=+8725.442088562" lastFinishedPulling="2026-01-23 09:19:38.133436124 +0000 UTC m=+8728.953785109" observedRunningTime="2026-01-23 09:19:38.669303705 +0000 UTC m=+8729.489652680" watchObservedRunningTime="2026-01-23 09:19:38.676241901 +0000 UTC m=+8729.496590876" Jan 23 09:19:43 crc kubenswrapper[5102]: I0123 09:19:43.593059 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:43 crc kubenswrapper[5102]: I0123 09:19:43.593640 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:43 crc kubenswrapper[5102]: I0123 09:19:43.635534 5102 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:43 crc kubenswrapper[5102]: I0123 09:19:43.723152 5102 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:43 crc kubenswrapper[5102]: I0123 09:19:43.881153 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7rg6"] Jan 23 09:19:45 crc kubenswrapper[5102]: I0123 09:19:45.693232 5102 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-h7rg6" podUID="115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" containerName="registry-server" containerID="cri-o://eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782" gracePeriod=2 Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.095839 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.166885 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rbqxl\" (UniqueName: \"kubernetes.io/projected/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-kube-api-access-rbqxl\") pod \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.166947 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-catalog-content\") pod \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.167034 5102 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-utilities\") pod \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\" (UID: \"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8\") " Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.168236 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-utilities" (OuterVolumeSpecName: "utilities") pod "115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" (UID: "115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.173568 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-kube-api-access-rbqxl" (OuterVolumeSpecName: "kube-api-access-rbqxl") pod "115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" (UID: "115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8"). InnerVolumeSpecName "kube-api-access-rbqxl". PluginName "kubernetes.io/projected", VolumeGidValue "" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.241114 5102 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" (UID: "115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.268951 5102 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rbqxl\" (UniqueName: \"kubernetes.io/projected/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-kube-api-access-rbqxl\") on node \"crc\" DevicePath \"\"" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.268994 5102 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-catalog-content\") on node \"crc\" DevicePath \"\"" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.269009 5102 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8-utilities\") on node \"crc\" DevicePath \"\"" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.701749 5102 generic.go:334] "Generic (PLEG): container finished" podID="115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" containerID="eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782" exitCode=0 Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.701815 5102 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-h7rg6" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.701832 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7rg6" event={"ID":"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8","Type":"ContainerDied","Data":"eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782"} Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.702165 5102 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-h7rg6" event={"ID":"115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8","Type":"ContainerDied","Data":"e39eee439187bb3fbd331c931326bf172bde54223b23feb2e170d5275b29dc1d"} Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.702187 5102 scope.go:117] "RemoveContainer" containerID="eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.722003 5102 scope.go:117] "RemoveContainer" containerID="ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.740073 5102 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7rg6"] Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.744682 5102 scope.go:117] "RemoveContainer" containerID="9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.745550 5102 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-h7rg6"] Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.766228 5102 scope.go:117] "RemoveContainer" containerID="eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782" Jan 23 09:19:46 crc kubenswrapper[5102]: E0123 09:19:46.766712 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782\": container with ID starting with eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782 not found: ID does not exist" containerID="eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.766771 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782"} err="failed to get container status \"eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782\": rpc error: code = NotFound desc = could not find container \"eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782\": container with ID starting with eb02036ba64383804130f8ccd787c9cfbf5f44333e7cddcbe035c41bb450d782 not found: ID does not exist" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.766811 5102 scope.go:117] "RemoveContainer" containerID="ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a" Jan 23 09:19:46 crc kubenswrapper[5102]: E0123 09:19:46.767301 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a\": container with ID starting with ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a not found: ID does not exist" containerID="ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.767348 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a"} err="failed to get container status \"ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a\": rpc error: code = NotFound desc = could not find container \"ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a\": container with ID starting with ec86dd3bd0fad4741601fe5a0450402d3fdda5ddcabd1e7088ebd002cd41304a not found: ID does not exist" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.767378 5102 scope.go:117] "RemoveContainer" containerID="9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad" Jan 23 09:19:46 crc kubenswrapper[5102]: E0123 09:19:46.767695 5102 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad\": container with ID starting with 9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad not found: ID does not exist" containerID="9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad" Jan 23 09:19:46 crc kubenswrapper[5102]: I0123 09:19:46.767726 5102 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad"} err="failed to get container status \"9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad\": rpc error: code = NotFound desc = could not find container \"9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad\": container with ID starting with 9f6b1c98f15b69de9a5bcdb8f9d54099959e917d8c32deef50f2b581beb67fad not found: ID does not exist" Jan 23 09:19:47 crc kubenswrapper[5102]: I0123 09:19:47.805695 5102 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8" path="/var/lib/kubelet/pods/115c4fb3-e6f1-4c29-ad5c-3c1c302c35e8/volumes" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515134636537024462 0ustar coreroot  Om77'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015134636537017377 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015134614501016505 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015134614501015455 5ustar corecore